modeling_openai.py 36.5 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""

18
import collections
thomwolf's avatar
thomwolf committed
19
20
import copy
import json
thomwolf's avatar
thomwolf committed
21
import logging
22
23
24
import math
import os
import shutil
thomwolf's avatar
thomwolf committed
25
26
import tarfile
import tempfile
thomwolf's avatar
thomwolf committed
27
28
29

import torch
import torch.nn as nn
thomwolf's avatar
thomwolf committed
30
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
31
32
from torch.nn.parameter import Parameter

thomwolf's avatar
thomwolf committed
33
from .file_utils import cached_path
34
from .modeling import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
35

thomwolf's avatar
thomwolf committed
36
37
logger = logging.getLogger(__name__)

38
39
40
41
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt.tar.gz"}
CONFIG_NAME = "openai_gpt_config.json"
WEIGHTS_NAME = "pytorch_model.bin"

42
43
44
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
    """
45
46
    import re
    import numpy as np
47
48
49
50
51
52
53
54
    print("Loading weights...")
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]

55
56
57
    # Thsi as used when we had a single embedding matrix for positions and tokens
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
    # del init_params[1]
58
59
60
    init_params = [arr.squeeze() for arr in init_params]

    try:
61
62
        assert model.tokens_embed.weight.shape == init_params[1].shape
        assert model.positions_embed.weight.shape == init_params[0].shape
63
    except AssertionError as e:
64
65
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
66
67
        raise

68
69
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
70
    names.pop(0)
71
72
    # Pop position and token embedding arrays
    init_params.pop(0)
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    init_params.pop(0)

    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
        name = name[6:]  # skip "model/"
        assert name[-2:] == ":0"
        name = name[:-2]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'w':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model

thomwolf's avatar
thomwolf committed
111
112
113
114
115
116
117
118
119

def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


def swish(x):
    return x * torch.sigmoid(x)


120
121
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}

thomwolf's avatar
thomwolf committed
122

thomwolf's avatar
thomwolf committed
123
124
125
class OpenAIGPTConfig(object):
    """Configuration class to store the configuration of a `OpenAIGPTModel`.
    """
126
127
128
129
130

    def __init__(
        self,
        vocab_size_or_config_json_file=40478,
        n_special=0,
thomwolf's avatar
thomwolf committed
131
        n_positions=512,
132
133
134
135
136
137
138
139
140
141
        n_ctx=512,
        n_embd=768,
        n_layer=12,
        n_head=12,
        afn="gelu",
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
        initializer_range=0.02,
    ):
thomwolf's avatar
thomwolf committed
142
143
144
145
146
        """Constructs OpenAIGPTConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
thomwolf's avatar
thomwolf committed
147
148
            n_positions: Number of positional embeddings.
            n_ctx: Size of the causal mask (usually same as n_positions).
thomwolf's avatar
thomwolf committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
            n_embd: Dimensionality of the embeddings and hidden states.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            afn: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            resid_pdrop: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attn_pdrop: The dropout ratio for the attention
                probabilities.
            embd_pdrop: The dropout ratio for the embeddings.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
        """
        if isinstance(vocab_size_or_config_json_file, str):
164
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
165
166
167
168
169
170
171
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.n_special = n_special
            self.n_ctx = n_ctx
thomwolf's avatar
thomwolf committed
172
            self.n_positions = n_positions
thomwolf's avatar
thomwolf committed
173
174
175
176
177
178
179
180
181
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
            self.afn = afn
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
            self.initializer_range = initializer_range
        else:
182
183
184
185
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )
thomwolf's avatar
thomwolf committed
186
187

    @property
188
189
    def total_tokens_embeddings(self):
        return self.vocab_size + self.n_special
thomwolf's avatar
thomwolf committed
190
191
192
193
194
195
196
197
198
199
200
201

    @classmethod
    def from_dict(cls, json_object):
        """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
        config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
        for key, value in json_object.items():
            config.__dict__[key] = value
        return config

    @classmethod
    def from_json_file(cls, json_file):
        """Constructs a `OpenAIGPTConfig` from a json file of parameters."""
202
        with open(json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
            text = reader.read()
        return cls.from_dict(json.loads(text))

    def __repr__(self):
        return str(self.to_json_string())

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

218

thomwolf's avatar
thomwolf committed
219
220
221
222
223
224
225
226
class Conv1D(nn.Module):
    def __init__(self, nf, rf, nx):
        super(Conv1D, self).__init__()
        self.rf = rf
        self.nf = nf
        if rf == 1:  # faster 1x1 conv
            w = torch.empty(nx, nf)
            nn.init.normal_(w, std=0.02)
thomwolf's avatar
thomwolf committed
227
228
            self.weight = Parameter(w)
            self.bias = Parameter(torch.zeros(nf))
thomwolf's avatar
thomwolf committed
229
230
231
232
233
234
        else:  # was used to train LM
            raise NotImplementedError

    def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
thomwolf's avatar
thomwolf committed
235
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
thomwolf's avatar
thomwolf committed
236
237
238
239
240
241
242
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x


class Attention(nn.Module):
243
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
244
245
246
        super(Attention, self).__init__()
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
247
        assert n_state % config.n_head == 0
thomwolf's avatar
thomwolf committed
248
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
249
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
250
251
252
253
        self.split_size = n_state
        self.scale = scale
        self.c_attn = Conv1D(n_state * 3, 1, nx)
        self.c_proj = Conv1D(n_state, 1, nx)
254
255
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
256
257
258
259
260

    def _attn(self, q, k, v):
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
261
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
thomwolf's avatar
thomwolf committed
262
        # XD: self.b may be larger than w, so we need to crop it
thomwolf's avatar
thomwolf committed
263
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
thomwolf's avatar
thomwolf committed
264
265
        w = w * b + -1e9 * (1 - b)

thomwolf's avatar
thomwolf committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
        return torch.matmul(w, v)

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

    def forward(self, x):
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
        a = self._attn(query, key, value)
        a = self.merge_heads(a)
        a = self.c_proj(a)
        a = self.resid_dropout(a)
        return a


class MLP(nn.Module):
297
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
thomwolf's avatar
thomwolf committed
298
        super(MLP, self).__init__()
299
        nx = config.n_embd
thomwolf's avatar
thomwolf committed
300
301
        self.c_fc = Conv1D(n_state, 1, nx)
        self.c_proj = Conv1D(nx, 1, n_state)
302
303
        self.act = ACT_FNS[config.afn]
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
304
305
306
307
308
309
310
311

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
        return self.dropout(h2)


class Block(nn.Module):
312
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
313
        super(Block, self).__init__()
314
315
        nx = config.n_embd
        self.attn = Attention(nx, n_ctx, config, scale)
thomwolf's avatar
thomwolf committed
316
        self.ln_1 = LayerNorm(nx)
317
        self.mlp = MLP(4 * nx, config)
thomwolf's avatar
thomwolf committed
318
319
320
321
322
323
324
325
326
327
        self.ln_2 = LayerNorm(nx)

    def forward(self, x):
        a = self.attn(x)
        n = self.ln_1(x + a)
        m = self.mlp(n)
        h = self.ln_2(n + m)
        return h


thomwolf's avatar
thomwolf committed
328
class OpenAIGPTLMHead(nn.Module):
thomwolf's avatar
thomwolf committed
329
330
    """ Language Model Head for the transformer """

331
    def __init__(self, model_embeddings_weights, config):
thomwolf's avatar
thomwolf committed
332
        super(OpenAIGPTLMHead, self).__init__()
333
        self.n_embd = config.n_embd
thomwolf's avatar
thomwolf committed
334
335
336
337
        self.set_embeddings_weights(model_embeddings_weights)

    def set_embeddings_weights(self, model_embeddings_weights):
        embed_shape = model_embeddings_weights.shape
thomwolf's avatar
thomwolf committed
338
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
339
        self.decoder.weight = model_embeddings_weights  # Tied weights
thomwolf's avatar
thomwolf committed
340

thomwolf's avatar
thomwolf committed
341
    def forward(self, hidden_state):
thomwolf's avatar
thomwolf committed
342
        # Truncated Language modeling logits (we remove the last token)
thomwolf's avatar
thomwolf committed
343
344
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
        lm_logits = self.decoder(hidden_state)
thomwolf's avatar
thomwolf committed
345
346
347
        return lm_logits


thomwolf's avatar
thomwolf committed
348
class OpenAIGPTMultipleChoiceHead(nn.Module):
thomwolf's avatar
thomwolf committed
349
350
    """ Classifier Head for the transformer """

351
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
352
        super(OpenAIGPTMultipleChoiceHead, self).__init__()
353
        self.n_embd = config.n_embd
thomwolf's avatar
thomwolf committed
354
        # self.multiple_choice_token = multiple_choice_token
355
356
        self.dropout = nn.Dropout2d(config.resid_pdrop)  # To reproduce the noise_shape parameter of TF implementation
        self.linear = nn.Linear(config.n_embd, 1)
thomwolf's avatar
thomwolf committed
357

358
        nn.init.normal_(self.linear.weight, std=0.02)
thomwolf's avatar
thomwolf committed
359
360
        nn.init.normal_(self.linear.bias, 0)

361
    def forward(self, hidden_states, mc_token_mask):
thomwolf's avatar
thomwolf committed
362
        # Classification logits
thomwolf's avatar
thomwolf committed
363
        # hidden_states = hidden_states.view(-1, self.n_embd)
364
365
366
        # mc_token_mask = mc_token_mask.view(-1, 1).expand_as(hidden_states)
        mc_token_mask = mc_token_mask.float()
        multiple_choice_h = hidden_states * mc_token_mask.unsqueeze(-1)
thomwolf's avatar
thomwolf committed
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
        multiple_choice_h = multiple_choice_h.sum(dim=-2)
        # flat = x[..., 0].contiguous().view(-1)
        # multiple_choice_h = multiple_choice_h[flat == self.multiple_choice_token, :]
        # multiple_choice_h = multiple_choice_h.view(-1, x.size(1), self.n_embd, 1)
        # # This double transposition is there to replicate the behavior
        # # of the noise_shape argument in the tensorflow
        # # implementation.  For more details, see
        # # https://github.com/huggingface/pytorch-openai-transformer-lm/issues/11
        # multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
        # multiple_choice_h = multiple_choice_h.contiguous().view(-1, self.n_embd)
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
        return multiple_choice_logits


class OpenAIGPTPreTrainedModel(nn.Module):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
385

thomwolf's avatar
thomwolf committed
386
387
388
389
390
391
392
393
    def __init__(self, config, *inputs, **kwargs):
        super(OpenAIGPTPreTrainedModel, self).__init__()
        if not isinstance(config, OpenAIGPTConfig):
            raise ValueError(
                "Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
                "To create a model from a pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
394
395
                )
            )
thomwolf's avatar
thomwolf committed
396
397
398
399
400
401
402
403
404
405
406
407
408
409
        self.config = config

    def init_weights(self, module):
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
410

thomwolf's avatar
thomwolf committed
411
412
413
414
    def set_num_special_tokens(self, num_special_tokens):
        pass

    @classmethod
415
416
417
    def from_pretrained(
        cls, pretrained_model_name, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
    ):
thomwolf's avatar
thomwolf committed
418
419
420
421
422
423
424
425
426
427
428
        """
        Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.

        Params:
            pretrained_model_name: either:
                - a str with the name of a pre-trained model to load selected in the list of:
                    . `openai-gpt`
                - a path or url to a pretrained model archive containing:
                    . `openai_gpt_config.json` a configuration file for the model
                    . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
429
430
431
432
                - a path or url to a pretrained model archive containing:
                    . `bert_config.json` a configuration file for the model
                    . a series of NumPy files containing OpenAI TensorFlow trained weights
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
thomwolf's avatar
thomwolf committed
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
            *inputs, **kwargs: additional input for the specific Bert class
                (ex: num_labels for BertForSequenceClassification)
        """
        if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
        else:
            archive_file = pretrained_model_name
        # redirect to the cache, if necessary
        try:
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
        except FileNotFoundError:
            logger.error(
                "Model name '{}' was not found in model name list ({}). "
                "We assumed '{}' was a path or url but couldn't find any file "
                "associated to this path or url.".format(
450
451
452
                    pretrained_model_name, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file
                )
            )
thomwolf's avatar
thomwolf committed
453
454
455
456
            return None
        if resolved_archive_file == archive_file:
            logger.info("loading archive file {}".format(archive_file))
        else:
457
            logger.info("loading archive file {} from cache at {}".format(archive_file, resolved_archive_file))
thomwolf's avatar
thomwolf committed
458
459
460
461
462
463
        tempdir = None
        if os.path.isdir(resolved_archive_file):
            serialization_dir = resolved_archive_file
        else:
            # Extract archive to temp dir
            tempdir = tempfile.mkdtemp()
464
465
            logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir))
            with tarfile.open(resolved_archive_file, "r:gz") as archive:
thomwolf's avatar
thomwolf committed
466
467
468
469
470
471
472
473
                archive.extractall(tempdir)
            serialization_dir = tempdir
        # Load config
        config_file = os.path.join(serialization_dir, CONFIG_NAME)
        config = OpenAIGPTConfig.from_json_file(config_file)
        logger.info("Model config {}".format(config))
        # Instantiate model.
        model = cls(config, *inputs, **kwargs)
474
        if state_dict is None and not from_tf:
thomwolf's avatar
thomwolf committed
475
            weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
476
477
478
479
480
481
482
            state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
        if tempdir:
            # Clean up temp dir
            shutil.rmtree(tempdir)
        if from_tf:
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
            return load_tf_weights_in_openai_gpt(model, serialization_dir)
thomwolf's avatar
thomwolf committed
483
484
485
486
487

        old_keys = []
        new_keys = []
        for key in state_dict.keys():
            new_key = None
thomwolf's avatar
thomwolf committed
488
489
490
491
492
493
            if key.endswith(".g"):
                new_key = key[:-2] + ".weight"
            elif key.endswith(".b"):
                new_key = key[:-2] + ".bias"
            elif key.endswith(".w"):
                new_key = key[:-2] + ".weight"
thomwolf's avatar
thomwolf committed
494
495
496
497
498
499
500
501
502
503
            if new_key:
                old_keys.append(key)
                new_keys.append(new_key)
        for old_key, new_key in zip(old_keys, new_keys):
            state_dict[new_key] = state_dict.pop(old_key)

        missing_keys = []
        unexpected_keys = []
        error_msgs = []
        # copy state_dict so _load_from_state_dict can modify it
504
        metadata = getattr(state_dict, "_metadata", None)
thomwolf's avatar
thomwolf committed
505
506
507
508
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

509
        def load(module, prefix=""):
thomwolf's avatar
thomwolf committed
510
511
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            module._load_from_state_dict(
512
513
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
            )
thomwolf's avatar
thomwolf committed
514
515
            for name, child in module._modules.items():
                if child is not None:
516
517
                    load(child, prefix + name + ".")

thomwolf's avatar
thomwolf committed
518
519
        start_model = model
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
thomwolf's avatar
update  
thomwolf committed
520
521
522
            start_model = model.transformer
        load(start_model, prefix="")

thomwolf's avatar
thomwolf committed
523
        if len(missing_keys) > 0:
524
525
526
            logger.info(
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
            )
thomwolf's avatar
thomwolf committed
527
        if len(unexpected_keys) > 0:
528
529
530
            logger.info(
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
            )
thomwolf's avatar
thomwolf committed
531
        if len(error_msgs) > 0:
532
533
534
            raise RuntimeError(
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
            )
thomwolf's avatar
thomwolf committed
535
        # Add additional embeddings for special tokens if needed
536
537
        # This step also make sure we are still sharing the output and input embeddings after loading weights
        model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
thomwolf's avatar
thomwolf committed
538
        return model
thomwolf's avatar
thomwolf committed
539
540


thomwolf's avatar
thomwolf committed
541
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
542
543
    """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").

544
545
546
547
548
549
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
550
551
552
553
554
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
555
         config.vocab_size + config.n_special - 1]                  ______________________
556

557
558
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
559
560
561
562
563
564
565
    You should use the associate indices to index the embeddings.

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
566
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
567
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
568
            with the position indices (selected in the range [0, config.n_positions - 1[.
569
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
570
571
572
573
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590

    Outputs:
        `hidden_states`: the encoded-hidden-states at the top of the model
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTModel(config)
    hidden_states = model(input_ids)
    ```
    """
591

592
593
    def __init__(self, config):
        super(OpenAIGPTModel, self).__init__(config)
594
595
596
        num_tokens = config.vocab_size + config.n_special
        self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
597
598
599
        self.drop = nn.Dropout(config.embd_pdrop)
        block = Block(config.n_ctx, config, scale=True)
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
600

thomwolf's avatar
thomwolf committed
601
602
603
604
        self.apply(self.init_weights)
        # nn.init.normal_(self.embed.weight, std=0.02)

    def set_num_special_tokens(self, num_special_tokens):
605
606
607
        " Update input embeddings with new embedding matrice if needed "
        if self.config.n_special == num_special_tokens:
            return
thomwolf's avatar
thomwolf committed
608
609
610
        # Update config
        self.config.n_special = num_special_tokens
        # # Build new embeddings and initialize
611
        old_embed = self.tokens_embed
612
        self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
thomwolf's avatar
thomwolf committed
613
        # Initialize all new embeddings (in particular the special tokens)
614
        self.init_weights(self.tokens_embed)
thomwolf's avatar
thomwolf committed
615
        # Copy word and positional embeddings from the previous weights
616
617
        self.tokens_embed.weight.data[: self.config.vocab_size, :] = old_embed.weight.data[: self.config.vocab_size, :]
        self.tokens_embed.weight.data[-self.config.n_positions :, :] = old_embed.weight.data[-self.config.n_positions :, :]
thomwolf's avatar
thomwolf committed
618

thomwolf's avatar
thomwolf committed
619
620
    def forward(self, input_ids, position_ids=None, token_type_ids=None):
        if position_ids is None:
621
622
623
624
625
            # This was used when we had a single embedding matrice from position and token embeddings
            # start = self.config.vocab_size + self.config.n_special
            # end = start + input_ids.size(-1)
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
thomwolf's avatar
thomwolf committed
626
627
628
629
630
631
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

632
633
        inputs_embeds = self.tokens_embed(input_ids)
        position_embeds = self.positions_embed(position_ids)
thomwolf's avatar
thomwolf committed
634
635
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
636
            token_type_embeds = self.tokens_embed(token_type_ids)
thomwolf's avatar
thomwolf committed
637
638
        else:
            token_type_embeds = 0
thomwolf's avatar
thomwolf committed
639
        # Add the position information to the input embeddings
thomwolf's avatar
thomwolf committed
640
641
        # h = e.sum(dim=2)
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
thomwolf's avatar
thomwolf committed
642
        for block in self.h:
thomwolf's avatar
thomwolf committed
643
644
            hidden_states = block(hidden_states)
        return hidden_states.view(*input_shape, hidden_states.size(-1))
thomwolf's avatar
thomwolf committed
645

646

thomwolf's avatar
thomwolf committed
647
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
648
649
    """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").

650
651
652
653
654
655
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
656
657
658
659
660
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
661
         config.vocab_size + config.n_special - 1]                  ______________________
662

663
664
665
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
666
667
668
669
670
671

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
672
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
673
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
674
            with the position indices (selected in the range [0, config.n_positions - 1[.
675
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
676
677
678
679
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
680
681
682
683
684
685
686
687
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]

    Outputs:
        if `lm_labels` is not `None`:
            Outputs the language modeling loss.
        else:
688
689
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
                (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
690
691
692
693
694
695
696
697
698
699
700
701

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTLMHeadModel(config)
    lm_logits = model(input_ids)
    ```
    """
702

703
704
705
    def __init__(self, config):
        super(OpenAIGPTLMHeadModel, self).__init__(config)
        self.transformer = OpenAIGPTModel(config)
706
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
thomwolf's avatar
thomwolf committed
707
708
709
710
711
        self.apply(self.init_weights)

    def set_num_special_tokens(self, num_special_tokens):
        " Update input and output embeddings with new embedding matrice "
        self.transformer.set_num_special_tokens(num_special_tokens)
712
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
thomwolf's avatar
thomwolf committed
713
714
715
716
717

    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
        lm_logits = self.lm_head(hidden_states)
        if lm_labels is not None:
thomwolf's avatar
thomwolf committed
718
719
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
thomwolf's avatar
thomwolf committed
720
721
            return loss
        return lm_logits
thomwolf's avatar
thomwolf committed
722

723

thomwolf's avatar
thomwolf committed
724
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
725
726
    """OpenAI GPT model with a Language Modeling and a Multiple Choice heads ("Improving Language Understanding by Generative Pre-Training").

727
728
729
730
731
732
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
733
734
735
736
737
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
738
         config.vocab_size + config.n_special - 1]                  ______________________
739

740
741
742
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
743
744
745
746
747

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
748
749
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
750
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
751
            with the position indices (selected in the range [0, config.n_positions - 1[.
752
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
753
754
755
756
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
757
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
758
759
            with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., total_tokens_embeddings]
760
761
762
763
764
765
766
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].

    Outputs:
        if `lm_labels` and `multiple_choice_labels` are not `None`:
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
        else: a tuple with
767
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
768
769
770
771
772
773
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
774
    mc_token_mask = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
775
776
777
778

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTLMHeadModel(config)
779
    lm_logits, multiple_choice_logits = model(input_ids, mc_token_mask)
780
781
    ```
    """
782

783
784
785
    def __init__(self, config):
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
        self.transformer = OpenAIGPTModel(config)
786
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
787
        self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
thomwolf's avatar
thomwolf committed
788
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
789

thomwolf's avatar
thomwolf committed
790
791
792
    def set_num_special_tokens(self, num_special_tokens):
        " Update input and output embeddings with new embedding matrice "
        self.transformer.set_num_special_tokens(num_special_tokens)
793
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
thomwolf's avatar
thomwolf committed
794

795
    def forward(self, input_ids, mc_token_mask, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
thomwolf's avatar
thomwolf committed
796
797
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
        lm_logits = self.lm_head(hidden_states)
798
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_mask)
thomwolf's avatar
thomwolf committed
799
800
        losses = []
        if lm_labels is not None:
thomwolf's avatar
thomwolf committed
801
802
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            losses.append(loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)))
803
        if mc_labels is not None:
thomwolf's avatar
thomwolf committed
804
            loss_fct = CrossEntropyLoss()
805
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
thomwolf's avatar
thomwolf committed
806
807
        if losses:
            return losses
808
        return lm_logits, mc_logits