modeling_openai.py 37.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

20
import collections
thomwolf's avatar
thomwolf committed
21
import json
thomwolf's avatar
thomwolf committed
22
import logging
23
24
import math
import os
thomwolf's avatar
thomwolf committed
25
26
import sys
from io import open
thomwolf's avatar
thomwolf committed
27
28
29

import torch
import torch.nn as nn
thomwolf's avatar
thomwolf committed
30
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
31
32
from torch.nn.parameter import Parameter

33
from .file_utils import cached_path
34
from .model_utils import Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig, PreTrainedModel, prune_conv1d_layer
35
from .modeling import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
36

thomwolf's avatar
thomwolf committed
37
38
logger = logging.getLogger(__name__)

39
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
thomwolf's avatar
thomwolf committed
40
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
41

42

43
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
44
45
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
    """
46
47
    import re
    import numpy as np
48
49
50
51
52
53

    if '.ckpt' in openai_checkpoint_folder_path:
        openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)

    logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))

54
55
56
57
58
59
60
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]

thomwolf's avatar
thomwolf committed
61
    # This was used when we had a single embedding matrix for positions and tokens
62
63
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
    # del init_params[1]
64
65
66
    init_params = [arr.squeeze() for arr in init_params]

    try:
67
68
        assert model.tokens_embed.weight.shape == init_params[1].shape
        assert model.positions_embed.weight.shape == init_params[0].shape
69
    except AssertionError as e:
70
71
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
72
73
        raise

74
75
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
76
    names.pop(0)
77
78
    # Pop position and token embedding arrays
    init_params.pop(0)
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
    init_params.pop(0)

    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
        name = name[6:]  # skip "model/"
        assert name[-2:] == ":0"
        name = name[:-2]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'w':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model

thomwolf's avatar
thomwolf committed
117
118
119
120
121
122
123
124
125

def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


def swish(x):
    return x * torch.sigmoid(x)


126
127
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}

thomwolf's avatar
thomwolf committed
128

129
class OpenAIGPTConfig(PretrainedConfig):
thomwolf's avatar
thomwolf committed
130
131
    """Configuration class to store the configuration of a `OpenAIGPTModel`.
    """
132
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP
133
134
135
136
137

    def __init__(
        self,
        vocab_size_or_config_json_file=40478,
        n_special=0,
thomwolf's avatar
thomwolf committed
138
        n_positions=512,
139
140
141
142
143
144
145
146
        n_ctx=512,
        n_embd=768,
        n_layer=12,
        n_head=12,
        afn="gelu",
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
147
        layer_norm_epsilon=1e-5,
148
        initializer_range=0.02,
thomwolf's avatar
thomwolf committed
149
150
        predict_special_tokens=True,
        **kwargs
151
    ):
thomwolf's avatar
thomwolf committed
152
153
154
155
156
        """Constructs OpenAIGPTConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
thomwolf's avatar
thomwolf committed
157
158
            n_positions: Number of positional embeddings.
            n_ctx: Size of the causal mask (usually same as n_positions).
thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
168
169
            n_embd: Dimensionality of the embeddings and hidden states.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            afn: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            resid_pdrop: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attn_pdrop: The dropout ratio for the attention
                probabilities.
            embd_pdrop: The dropout ratio for the embeddings.
170
            layer_norm_epsilon: epsilon to use in the layer norm layers
thomwolf's avatar
thomwolf committed
171
172
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
173
            predict_special_tokens: should we predict special tokens (when the model has a LM head)
thomwolf's avatar
thomwolf committed
174
        """
thomwolf's avatar
thomwolf committed
175
176
        super(OpenAIGPTConfig, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
177
178
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
179
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
180
181
182
183
184
185
186
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.n_special = n_special
            self.n_ctx = n_ctx
thomwolf's avatar
thomwolf committed
187
            self.n_positions = n_positions
thomwolf's avatar
thomwolf committed
188
189
190
191
192
193
194
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
            self.afn = afn
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
195
            self.layer_norm_epsilon = layer_norm_epsilon
thomwolf's avatar
thomwolf committed
196
            self.initializer_range = initializer_range
197
            self.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
198
        else:
199
200
201
202
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )
thomwolf's avatar
thomwolf committed
203
204

    @property
205
206
    def total_tokens_embeddings(self):
        return self.vocab_size + self.n_special
thomwolf's avatar
thomwolf committed
207

thomwolf's avatar
thomwolf committed
208
209
210
211
212
213
214
215
216
217
218
219
    @property
    def hidden_size(self):
        return self.n_embd

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer

thomwolf's avatar
thomwolf committed
220
221

class Attention(nn.Module):
thomwolf's avatar
thomwolf committed
222
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
223
224
225
        super(Attention, self).__init__()
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
226
        assert n_state % config.n_head == 0
thomwolf's avatar
thomwolf committed
227
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
228
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
229
230
        self.split_size = n_state
        self.scale = scale
231

thomwolf's avatar
thomwolf committed
232
        self.output_attentions = config.output_attentions
233

234
235
        self.c_attn = Conv1D(n_state * 3, nx)
        self.c_proj = Conv1D(n_state, nx)
236
237
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
238

239
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
240
241
        if len(heads) == 0:
            return
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        mask = torch.ones(self.n_head, self.split_size // self.n_head)
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
        # Prune conv1d layers
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
        # Update hyper params
        self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
        self.n_head = self.n_head - len(heads)

    def _attn(self, q, k, v, head_mask=None):
thomwolf's avatar
thomwolf committed
256
257
258
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
259
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
thomwolf's avatar
thomwolf committed
260
        # XD: self.b may be larger than w, so we need to crop it
thomwolf's avatar
thomwolf committed
261
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
thomwolf's avatar
thomwolf committed
262
263
        w = w * b + -1e9 * (1 - b)

thomwolf's avatar
thomwolf committed
264
265
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
266
267
268
269
270

        # Mask heads if we want to
        if head_mask is not None:
            w = w * head_mask

thomwolf's avatar
thomwolf committed
271
        outputs = [torch.matmul(w, v)]
thomwolf's avatar
thomwolf committed
272
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
273
274
            outputs.append(w)
        return outputs
thomwolf's avatar
thomwolf committed
275
276
277
278
279
280
281
282
283
284
285
286
287
288

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

289
    def forward(self, x, head_mask=None):
thomwolf's avatar
thomwolf committed
290
291
292
293
294
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
295

thomwolf's avatar
thomwolf committed
296
297
        attn_outputs = self._attn(query, key, value, head_mask)
        a = attn_outputs[0]
298

thomwolf's avatar
thomwolf committed
299
300
301
        a = self.merge_heads(a)
        a = self.c_proj(a)
        a = self.resid_dropout(a)
thomwolf's avatar
thomwolf committed
302
303
304

        outputs = [a] + attn_outputs[1:]
        return outputs  # a, (attentions)
thomwolf's avatar
thomwolf committed
305
306
307


class MLP(nn.Module):
308
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
thomwolf's avatar
thomwolf committed
309
        super(MLP, self).__init__()
310
        nx = config.n_embd
311
312
        self.c_fc = Conv1D(n_state, nx)
        self.c_proj = Conv1D(nx, n_state)
313
314
        self.act = ACT_FNS[config.afn]
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
315
316
317
318
319
320
321
322

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
        return self.dropout(h2)


class Block(nn.Module):
thomwolf's avatar
thomwolf committed
323
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
324
        super(Block, self).__init__()
325
        nx = config.n_embd
thomwolf's avatar
thomwolf committed
326
        self.attn = Attention(nx, n_ctx, config, scale)
327
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
328
        self.mlp = MLP(4 * nx, config)
329
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
330

331
    def forward(self, x, head_mask=None):
thomwolf's avatar
thomwolf committed
332
333
334
        attn_outputs = self.attn(x, head_mask=head_mask)
        a = attn_outputs[0]

thomwolf's avatar
thomwolf committed
335
336
337
        n = self.ln_1(x + a)
        m = self.mlp(n)
        h = self.ln_2(n + m)
thomwolf's avatar
thomwolf committed
338
339
340

        outputs = [h] + attn_outputs[1:]
        return outputs
thomwolf's avatar
thomwolf committed
341
342


thomwolf's avatar
thomwolf committed
343
class OpenAIGPTLMHead(nn.Module):
thomwolf's avatar
thomwolf committed
344
345
    """ Language Model Head for the transformer """

346
    def __init__(self, model_embeddings_weights, config):
thomwolf's avatar
thomwolf committed
347
        super(OpenAIGPTLMHead, self).__init__()
348
        self.n_embd = config.n_embd
349
350
        self.vocab_size = config.vocab_size
        self.predict_special_tokens = config.predict_special_tokens
thomwolf's avatar
thomwolf committed
351
352
        embed_shape = model_embeddings_weights.shape
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
thomwolf's avatar
thomwolf committed
353
354
        self.set_embeddings_weights(model_embeddings_weights)

355
356
    def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
        self.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
357
        embed_shape = model_embeddings_weights.shape
358
        self.decoder.weight = model_embeddings_weights  # Tied weights
thomwolf's avatar
thomwolf committed
359

thomwolf's avatar
thomwolf committed
360
361
    def forward(self, hidden_state):
        lm_logits = self.decoder(hidden_state)
362
363
        if not self.predict_special_tokens:
            lm_logits = lm_logits[..., :self.vocab_size]
thomwolf's avatar
thomwolf committed
364
365
366
        return lm_logits


thomwolf's avatar
thomwolf committed
367
class OpenAIGPTMultipleChoiceHead(nn.Module):
thomwolf's avatar
thomwolf committed
368
369
    """ Classifier Head for the transformer """

370
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
371
        super(OpenAIGPTMultipleChoiceHead, self).__init__()
372
373
374
        self.n_embd = config.n_embd
        self.dropout = nn.Dropout2d(config.resid_pdrop)  # To reproduce the noise_shape parameter of TF implementation
        self.linear = nn.Linear(config.n_embd, 1)
thomwolf's avatar
thomwolf committed
375

376
        nn.init.normal_(self.linear.weight, std=0.02)
thomwolf's avatar
thomwolf committed
377
378
        nn.init.normal_(self.linear.bias, 0)

thomwolf's avatar
thomwolf committed
379
380
381
382
383
384
385
386
387
388
    def forward(self, hidden_states, mc_token_ids=None):
        """ Extract classification token hidden state and project it using self.linear
            hidden_state: hidden state of shape (bsz, num_choices, seq_length, hidden_size)
            mc_token_ids: [optional] index of the classification token, shape (bsz, num_choices)
            if mc_token_ids=None we take the last token of the sequence as classification token
        """
        if mc_token_ids is None:
            mc_token_ids = torch.full_like(hidden_states[:, :, :1, :], hidden_states.shape[2] - 1, dtype=torch.long)
        else:
            mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
thomwolf's avatar
thomwolf committed
389
390
391
        # (bsz, num_choices, 1, hidden_size)
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
        # (bsz, num_choices, hidden_size)
Philipp Glock's avatar
Philipp Glock committed
392
        multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
thomwolf's avatar
thomwolf committed
393
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
thomwolf's avatar
thomwolf committed
394
        # (bsz, num_choices)
thomwolf's avatar
thomwolf committed
395
396
397
        return multiple_choice_logits


398
class OpenAIGPTPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
399
400
401
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
402
403
404
405
    config_class = OpenAIGPTConfig
    pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
    load_tf_weights = load_tf_weights_in_openai_gpt
    base_model_prefix = "transformer"
406

407
408
409
    def __init__(self, *inputs, **kwargs):
        super(OpenAIGPTPreTrainedModel, self).__init__(*inputs, **kwargs)

thomwolf's avatar
thomwolf committed
410
411
412
    def init_weights(self, module):
        """ Initialize the weights.
        """
413
        if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
thomwolf's avatar
thomwolf committed
414
415
416
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
417
418
            if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
419
420
421
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
422

thomwolf's avatar
thomwolf committed
423
    @classmethod
424
    def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
thomwolf's avatar
thomwolf committed
425
426
427
428
429
        """
        Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.

        Params:
thomwolf's avatar
thomwolf committed
430
            pretrained_model_name_or_path: either:
thomwolf's avatar
thomwolf committed
431
432
                - a str with the name of a pre-trained model to load selected in the list of:
                - a path or url to a pretrained model archive containing:
433
                    . `config.json` a configuration file for the model
thomwolf's avatar
thomwolf committed
434
                    . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
435
                - a path or url to a pretrained model archive containing:
436
                    . `config.json` a configuration file for the model
437
438
                    . a series of NumPy files containing OpenAI TensorFlow trained weights
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
thomwolf's avatar
thomwolf committed
439
440
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
441
            *inputs, **kwargs: additional input for the specific OpenAI-GPT class
thomwolf's avatar
thomwolf committed
442
        """
443
444
445
446
        num_special_tokens = kwargs.get('num_special_tokens', None)
        kwargs.pop('num_special_tokens', None)

        model = PreTrainedModel.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
447

thomwolf's avatar
thomwolf committed
448
        # Add additional embeddings for special tokens if needed
449
        # This step also make sure we are still sharing the output and input embeddings after loading weights
450
        model.set_num_special_tokens(num_special_tokens)
thomwolf's avatar
thomwolf committed
451
        return model
thomwolf's avatar
thomwolf committed
452
453


thomwolf's avatar
thomwolf committed
454
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
455
456
    """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").

457
458
459
460
461
462
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
463
464
465
466
467
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
468
         config.vocab_size + config.n_special - 1]                  ______________________
469

470
471
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
472
473
474
    You should use the associate indices to index the embeddings.

    Params:
475
476
477
478
        `config`: a OpenAIGPTConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
479
480
481

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
482
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
483
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
484
            with the position indices (selected in the range [0, config.n_positions - 1[.
485
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
486
487
488
489
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
490
491
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
492
493

    Outputs:
494
495
        `hidden_states`: a list of all the encoded-hidden-states in the model (length of the list: number of layers + 1 for the output of the embeddings)
            as torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
496
497
498
499
500
501
502
503
504
505
506
507
508
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTModel(config)
    hidden_states = model(input_ids)
    ```
    """
509

thomwolf's avatar
thomwolf committed
510
    def __init__(self, config):
511
        super(OpenAIGPTModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
512
513
514
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states

thomwolf's avatar
thomwolf committed
515
        self.tokens_embed = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
516
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
517
        self.drop = nn.Dropout(config.embd_pdrop)
518
        self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
519

thomwolf's avatar
thomwolf committed
520
521
        self.apply(self.init_weights)

522
    def set_num_special_tokens(self, num_special_tokens=None):
523
        " Update input embeddings with new embedding matrice if needed "
524
        if num_special_tokens is None or self.config.n_special == num_special_tokens:
525
            return
thomwolf's avatar
thomwolf committed
526
527
        # Update config
        self.config.n_special = num_special_tokens
thomwolf's avatar
thomwolf committed
528
        # Build new embeddings and initialize all new embeddings (in particular the special tokens)
529
        old_embed = self.tokens_embed
530
        self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
thomwolf's avatar
thomwolf committed
531
        self.tokens_embed.to(old_embed.weight.device)
532
        self.init_weights(self.tokens_embed)
thomwolf's avatar
thomwolf committed
533
534
        # Copy word embeddings from the previous weights
        self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
thomwolf's avatar
thomwolf committed
535

thomwolf's avatar
thomwolf committed
536
    def _prune_heads(self, heads_to_prune):
537
538
539
540
541
542
543
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.h[layer].attn.prune_heads(heads)

    def forward(self, input_ids, position_ids=None, token_type_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
544
        if position_ids is None:
545
546
547
548
549
            # This was used when we had a single embedding matrice from position and token embeddings
            # start = self.config.vocab_size + self.config.n_special
            # end = start + input_ids.size(-1)
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
thomwolf's avatar
thomwolf committed
550
551
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

552
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
553
        # 1.0 in head_mask indicate we keep the head
554
        # attention_probs has shape bsz x n_heads x N x N
555
        # head_mask has shape n_layer x batch x n_heads x N x N
556
557
        if head_mask is not None:
            if head_mask.dim() == 1:
558
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
559
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
560
            elif head_mask.dim() == 2:
561
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
562
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
563
564
        else:
            head_mask = [None] * self.config.n_layer
565

thomwolf's avatar
thomwolf committed
566
567
568
569
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

570
571
        inputs_embeds = self.tokens_embed(input_ids)
        position_embeds = self.positions_embed(position_ids)
thomwolf's avatar
thomwolf committed
572
573
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
574
            token_type_embeds = self.tokens_embed(token_type_ids)
thomwolf's avatar
thomwolf committed
575
576
577
        else:
            token_type_embeds = 0
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
578
579
        hidden_states = self.drop(hidden_states)

580
581
        output_shape = input_shape + (hidden_states.size(-1),)

thomwolf's avatar
thomwolf committed
582
        all_attentions = []
thomwolf's avatar
thomwolf committed
583
        all_hidden_states = []
584
        for i, block in enumerate(self.h):
thomwolf's avatar
thomwolf committed
585
586
587
            if self.output_hidden_states:
                all_hidden_states.append(hidden_states.view(*output_shape))

588
            outputs = block(hidden_states, head_mask[i])
thomwolf's avatar
thomwolf committed
589
            hidden_states = outputs[0]
thomwolf's avatar
thomwolf committed
590
            if self.output_attentions:
thomwolf's avatar
thomwolf committed
591
592
593
594
                all_attentions.append(outputs[1])

        # Add last layer
        if self.output_hidden_states:
595
596
            all_hidden_states.append(hidden_states.view(*output_shape))

thomwolf's avatar
thomwolf committed
597
598
599
        outputs = [hidden_states.view(*output_shape)]
        if self.output_hidden_states:
            outputs.append(all_hidden_states)
thomwolf's avatar
thomwolf committed
600
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
601
602
            outputs.append(all_attentions)
        return outputs  # last hidden state, (all hidden states), (all attentions)
thomwolf's avatar
thomwolf committed
603

604

thomwolf's avatar
thomwolf committed
605
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
606
607
    """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").

608
609
610
611
612
613
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
614
615
616
617
618
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
619
         config.vocab_size + config.n_special - 1]                  ______________________
620

621
622
623
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
624
625

    Params:
626
627
628
629
        `config`: a OpenAIGPTConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
630
631
632

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
633
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
634
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
635
            with the position indices (selected in the range [0, config.n_positions - 1[.
636
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
637
638
639
640
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
641
642
643
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
644
645
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
646
647
648
649
650

    Outputs:
        if `lm_labels` is not `None`:
            Outputs the language modeling loss.
        else:
651
652
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
                (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
653
654
655
656
657
658
659
660
661
662
663
664

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTLMHeadModel(config)
    lm_logits = model(input_ids)
    ```
    """
665

thomwolf's avatar
thomwolf committed
666
    def __init__(self, config):
667
        super(OpenAIGPTLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
668
        self.transformer = OpenAIGPTModel(config)
669
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
thomwolf's avatar
thomwolf committed
670
671
        self.apply(self.init_weights)

672
    def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
673
674
675
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
        """
676
        self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
677
        self.transformer.set_num_special_tokens(num_special_tokens)
678
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens)
thomwolf's avatar
thomwolf committed
679

680
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
681
682
        transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, head_mask)
        hidden_states = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
683
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
684
685

        outputs = [lm_logits] + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
686
        if lm_labels is not None:
687
            # Shift so that tokens < n predict n
thomwolf's avatar
thomwolf committed
688
689
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
Catalin Voss's avatar
Catalin Voss committed
690
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
691
            loss_fct = CrossEntropyLoss(ignore_index=-1)
692
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
693
                            shift_labels.view(-1))
thomwolf's avatar
thomwolf committed
694
695
696
            outputs = [loss] + outputs

        return outputs  # (loss), lm_logits, (all hidden states), (all attentions)
thomwolf's avatar
thomwolf committed
697

698

thomwolf's avatar
thomwolf committed
699
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
thomwolf's avatar
thomwolf committed
700
    """OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
701

702
703
704
705
706
707
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
708
709
710
711
712
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
713
         config.vocab_size + config.n_special - 1]                  ______________________
714

715
716
717
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
718
719

    Params:
720
721
722
723
        `config`: a OpenAIGPTConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
724
725

    Inputs:
thomwolf's avatar
thomwolf committed
726
727
728
729
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
            indices selected in the range [0, total_tokens_embeddings[
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
730
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
731
            with the position indices (selected in the range [0, config.n_positions - 1[.
732
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
733
734
735
736
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
737
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
738
739
            with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., total_tokens_embeddings]
740
741
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].
742
743
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
744
745
746
747
748

    Outputs:
        if `lm_labels` and `multiple_choice_labels` are not `None`:
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
        else: a tuple with
749
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
750
751
752
753
754
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]

    Example usage:
    ```python
    # Already been converted into BPE token ids
thomwolf's avatar
thomwolf committed
755
756
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
757
758
759

    config = modeling_openai.OpenAIGPTConfig()

VictorSanh's avatar
VictorSanh committed
760
    model = modeling_openai.OpenAIGPTDoubleHeadsModel(config)
thomwolf's avatar
thomwolf committed
761
    lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
762
763
    ```
    """
764

thomwolf's avatar
thomwolf committed
765
    def __init__(self, config):
766
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
767
        self.transformer = OpenAIGPTModel(config)
768
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
769
        self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
thomwolf's avatar
thomwolf committed
770
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
771

772
    def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
773
774
775
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
        """
776
        self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
777
        self.transformer.set_num_special_tokens(num_special_tokens)
778
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens)
thomwolf's avatar
thomwolf committed
779

thomwolf's avatar
thomwolf committed
780
    def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
781
                position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
782
783
        transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, head_mask)
        hidden_states = transformer_outputs[0]
784

thomwolf's avatar
thomwolf committed
785
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
786
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
thomwolf's avatar
thomwolf committed
787
788
789
790
791
792
793

        outputs = [lm_logits, mc_logits] + transformer_outputs[1:]
        if mc_labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
                            mc_labels.view(-1))
            outputs = [loss] + outputs
thomwolf's avatar
thomwolf committed
794
        if lm_labels is not None:
thomwolf's avatar
thomwolf committed
795
796
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
thomwolf's avatar
thomwolf committed
797
            loss_fct = CrossEntropyLoss(ignore_index=-1)
thomwolf's avatar
thomwolf committed
798
799
800
801
802
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                            shift_labels.view(-1))
            outputs = [loss] + outputs

        return outputs  # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)