"src/vscode:/vscode.git/clone" did not exist on "13238ba18c6ba2d648ac9e0df746690fe14b54ba"
modeling_openai.py 37.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

20
import collections
thomwolf's avatar
thomwolf committed
21
22
import copy
import json
thomwolf's avatar
thomwolf committed
23
import logging
24
25
26
import math
import os
import shutil
thomwolf's avatar
thomwolf committed
27
28
import tarfile
import tempfile
thomwolf's avatar
thomwolf committed
29
30
import sys
from io import open
thomwolf's avatar
thomwolf committed
31
32
33

import torch
import torch.nn as nn
thomwolf's avatar
thomwolf committed
34
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
35
36
from torch.nn.parameter import Parameter

thomwolf's avatar
thomwolf committed
37
from .file_utils import cached_path
38
from .modeling import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
39

thomwolf's avatar
thomwolf committed
40
41
logger = logging.getLogger(__name__)

42
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
thomwolf's avatar
thomwolf committed
43
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
44

thomwolf's avatar
thomwolf committed
45
CONFIG_NAME = "config.json"
46
47
WEIGHTS_NAME = "pytorch_model.bin"

48
49
50
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
    """
51
52
    import re
    import numpy as np
53
54
55
56
57
58
59
60
    print("Loading weights...")
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]

thomwolf's avatar
thomwolf committed
61
    # This was used when we had a single embedding matrix for positions and tokens
62
63
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
    # del init_params[1]
64
65
66
    init_params = [arr.squeeze() for arr in init_params]

    try:
67
68
        assert model.tokens_embed.weight.shape == init_params[1].shape
        assert model.positions_embed.weight.shape == init_params[0].shape
69
    except AssertionError as e:
70
71
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
72
73
        raise

74
75
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
76
    names.pop(0)
77
78
    # Pop position and token embedding arrays
    init_params.pop(0)
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
    init_params.pop(0)

    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
        name = name[6:]  # skip "model/"
        assert name[-2:] == ":0"
        name = name[:-2]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'w':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model

thomwolf's avatar
thomwolf committed
117
118
119
120
121
122
123
124
125

def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


def swish(x):
    return x * torch.sigmoid(x)


126
127
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}

thomwolf's avatar
thomwolf committed
128

thomwolf's avatar
thomwolf committed
129
130
131
class OpenAIGPTConfig(object):
    """Configuration class to store the configuration of a `OpenAIGPTModel`.
    """
132
133
134
135
136

    def __init__(
        self,
        vocab_size_or_config_json_file=40478,
        n_special=0,
thomwolf's avatar
thomwolf committed
137
        n_positions=512,
138
139
140
141
142
143
144
145
        n_ctx=512,
        n_embd=768,
        n_layer=12,
        n_head=12,
        afn="gelu",
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
146
        layer_norm_epsilon=1e-5,
147
148
        initializer_range=0.02,
    ):
thomwolf's avatar
thomwolf committed
149
150
151
152
153
        """Constructs OpenAIGPTConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
thomwolf's avatar
thomwolf committed
154
155
            n_positions: Number of positional embeddings.
            n_ctx: Size of the causal mask (usually same as n_positions).
thomwolf's avatar
thomwolf committed
156
157
158
159
160
161
162
163
164
165
166
            n_embd: Dimensionality of the embeddings and hidden states.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            afn: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            resid_pdrop: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attn_pdrop: The dropout ratio for the attention
                probabilities.
            embd_pdrop: The dropout ratio for the embeddings.
167
            layer_norm_epsilon: epsilon to use in the layer norm layers
thomwolf's avatar
thomwolf committed
168
169
170
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
        """
thomwolf's avatar
thomwolf committed
171
172
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
173
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
174
175
176
177
178
179
180
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.n_special = n_special
            self.n_ctx = n_ctx
thomwolf's avatar
thomwolf committed
181
            self.n_positions = n_positions
thomwolf's avatar
thomwolf committed
182
183
184
185
186
187
188
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
            self.afn = afn
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
189
            self.layer_norm_epsilon = layer_norm_epsilon
thomwolf's avatar
thomwolf committed
190
191
            self.initializer_range = initializer_range
        else:
192
193
194
195
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )
thomwolf's avatar
thomwolf committed
196
197

    @property
198
199
    def total_tokens_embeddings(self):
        return self.vocab_size + self.n_special
thomwolf's avatar
thomwolf committed
200
201
202
203
204
205
206
207
208
209
210
211

    @classmethod
    def from_dict(cls, json_object):
        """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
        config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
        for key, value in json_object.items():
            config.__dict__[key] = value
        return config

    @classmethod
    def from_json_file(cls, json_file):
        """Constructs a `OpenAIGPTConfig` from a json file of parameters."""
212
        with open(json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
            text = reader.read()
        return cls.from_dict(json.loads(text))

    def __repr__(self):
        return str(self.to_json_string())

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

228

thomwolf's avatar
thomwolf committed
229
230
231
232
233
234
235
236
class Conv1D(nn.Module):
    def __init__(self, nf, rf, nx):
        super(Conv1D, self).__init__()
        self.rf = rf
        self.nf = nf
        if rf == 1:  # faster 1x1 conv
            w = torch.empty(nx, nf)
            nn.init.normal_(w, std=0.02)
thomwolf's avatar
thomwolf committed
237
238
            self.weight = Parameter(w)
            self.bias = Parameter(torch.zeros(nf))
thomwolf's avatar
thomwolf committed
239
240
241
242
243
244
        else:  # was used to train LM
            raise NotImplementedError

    def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
thomwolf's avatar
thomwolf committed
245
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
thomwolf's avatar
thomwolf committed
246
247
248
249
250
251
252
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x


class Attention(nn.Module):
253
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
254
255
256
        super(Attention, self).__init__()
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
257
        assert n_state % config.n_head == 0
thomwolf's avatar
thomwolf committed
258
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
259
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
260
261
262
263
        self.split_size = n_state
        self.scale = scale
        self.c_attn = Conv1D(n_state * 3, 1, nx)
        self.c_proj = Conv1D(n_state, 1, nx)
264
265
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
266
267
268
269
270

    def _attn(self, q, k, v):
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
271
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
thomwolf's avatar
thomwolf committed
272
        # XD: self.b may be larger than w, so we need to crop it
thomwolf's avatar
thomwolf committed
273
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
thomwolf's avatar
thomwolf committed
274
275
        w = w * b + -1e9 * (1 - b)

thomwolf's avatar
thomwolf committed
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
        return torch.matmul(w, v)

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

    def forward(self, x):
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
        a = self._attn(query, key, value)
        a = self.merge_heads(a)
        a = self.c_proj(a)
        a = self.resid_dropout(a)
        return a


class MLP(nn.Module):
307
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
thomwolf's avatar
thomwolf committed
308
        super(MLP, self).__init__()
309
        nx = config.n_embd
thomwolf's avatar
thomwolf committed
310
311
        self.c_fc = Conv1D(n_state, 1, nx)
        self.c_proj = Conv1D(nx, 1, n_state)
312
313
        self.act = ACT_FNS[config.afn]
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
314
315
316
317
318
319
320
321

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
        return self.dropout(h2)


class Block(nn.Module):
322
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
323
        super(Block, self).__init__()
324
325
        nx = config.n_embd
        self.attn = Attention(nx, n_ctx, config, scale)
326
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
327
        self.mlp = MLP(4 * nx, config)
328
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
329
330
331
332
333
334
335
336
337

    def forward(self, x):
        a = self.attn(x)
        n = self.ln_1(x + a)
        m = self.mlp(n)
        h = self.ln_2(n + m)
        return h


thomwolf's avatar
thomwolf committed
338
class OpenAIGPTLMHead(nn.Module):
thomwolf's avatar
thomwolf committed
339
340
    """ Language Model Head for the transformer """

341
    def __init__(self, model_embeddings_weights, config):
thomwolf's avatar
thomwolf committed
342
        super(OpenAIGPTLMHead, self).__init__()
343
        self.n_embd = config.n_embd
thomwolf's avatar
thomwolf committed
344
345
346
347
        self.set_embeddings_weights(model_embeddings_weights)

    def set_embeddings_weights(self, model_embeddings_weights):
        embed_shape = model_embeddings_weights.shape
thomwolf's avatar
thomwolf committed
348
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
349
        self.decoder.weight = model_embeddings_weights  # Tied weights
thomwolf's avatar
thomwolf committed
350

thomwolf's avatar
thomwolf committed
351
    def forward(self, hidden_state):
thomwolf's avatar
thomwolf committed
352
        # Truncated Language modeling logits (we remove the last token)
thomwolf's avatar
thomwolf committed
353
354
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
        lm_logits = self.decoder(hidden_state)
thomwolf's avatar
thomwolf committed
355
356
357
        return lm_logits


thomwolf's avatar
thomwolf committed
358
class OpenAIGPTMultipleChoiceHead(nn.Module):
thomwolf's avatar
thomwolf committed
359
360
    """ Classifier Head for the transformer """

361
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
362
        super(OpenAIGPTMultipleChoiceHead, self).__init__()
363
        self.n_embd = config.n_embd
thomwolf's avatar
thomwolf committed
364
        # self.multiple_choice_token = multiple_choice_token
365
366
        self.dropout = nn.Dropout2d(config.resid_pdrop)  # To reproduce the noise_shape parameter of TF implementation
        self.linear = nn.Linear(config.n_embd, 1)
thomwolf's avatar
thomwolf committed
367

368
        nn.init.normal_(self.linear.weight, std=0.02)
thomwolf's avatar
thomwolf committed
369
370
        nn.init.normal_(self.linear.bias, 0)

thomwolf's avatar
thomwolf committed
371
    def forward(self, hidden_states, mc_token_ids):
thomwolf's avatar
thomwolf committed
372
        # Classification logits
thomwolf's avatar
thomwolf committed
373
        # hidden_state (bsz, num_choices, seq_length, hidden_size)
thomwolf's avatar
thomwolf committed
374
375
        # mc_token_ids (bsz, num_choices, 1)
        mc_token_ids = mc_token_ids.unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
thomwolf's avatar
thomwolf committed
376
377
378
        # (bsz, num_choices, 1, hidden_size)
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
        # (bsz, num_choices, hidden_size)
Philipp Glock's avatar
Philipp Glock committed
379
        multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
thomwolf's avatar
thomwolf committed
380
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
thomwolf's avatar
thomwolf committed
381
        # (bsz, num_choices)
thomwolf's avatar
thomwolf committed
382
383
384
385
386
387
388
        return multiple_choice_logits


class OpenAIGPTPreTrainedModel(nn.Module):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
389

thomwolf's avatar
thomwolf committed
390
391
392
393
394
395
396
397
    def __init__(self, config, *inputs, **kwargs):
        super(OpenAIGPTPreTrainedModel, self).__init__()
        if not isinstance(config, OpenAIGPTConfig):
            raise ValueError(
                "Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
                "To create a model from a pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
398
399
                )
            )
thomwolf's avatar
thomwolf committed
400
401
402
403
404
405
406
407
408
409
410
411
412
413
        self.config = config

    def init_weights(self, module):
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
414

thomwolf's avatar
thomwolf committed
415
416
417
418
    def set_num_special_tokens(self, num_special_tokens):
        pass

    @classmethod
419
    def from_pretrained(
thomwolf's avatar
thomwolf committed
420
        cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
421
    ):
thomwolf's avatar
thomwolf committed
422
423
424
425
426
        """
        Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.

        Params:
thomwolf's avatar
thomwolf committed
427
            pretrained_model_name_or_path: either:
thomwolf's avatar
thomwolf committed
428
429
430
431
432
                - a str with the name of a pre-trained model to load selected in the list of:
                    . `openai-gpt`
                - a path or url to a pretrained model archive containing:
                    . `openai_gpt_config.json` a configuration file for the model
                    . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
433
434
435
436
                - a path or url to a pretrained model archive containing:
                    . `bert_config.json` a configuration file for the model
                    . a series of NumPy files containing OpenAI TensorFlow trained weights
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
thomwolf's avatar
thomwolf committed
437
438
439
440
441
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
            *inputs, **kwargs: additional input for the specific Bert class
                (ex: num_labels for BertForSequenceClassification)
        """
thomwolf's avatar
thomwolf committed
442
443
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
444
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
thomwolf's avatar
thomwolf committed
445
        else:
thomwolf's avatar
thomwolf committed
446
            archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
447
            config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
thomwolf's avatar
thomwolf committed
448
449
450
        # redirect to the cache, if necessary
        try:
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
451
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
thomwolf's avatar
thomwolf committed
452
        except EnvironmentError:
thomwolf's avatar
thomwolf committed
453
454
            logger.error(
                "Model name '{}' was not found in model name list ({}). "
455
456
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
                "at this path or url.".format(
thomwolf's avatar
thomwolf committed
457
                    pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
458
                    archive_file, config_file
459
460
                )
            )
thomwolf's avatar
thomwolf committed
461
            return None
462
463
464
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
            logger.info("loading weights file {}".format(archive_file))
            logger.info("loading configuration file {}".format(config_file))
thomwolf's avatar
thomwolf committed
465
        else:
466
467
468
469
            logger.info("loading weights file {} from cache at {}".format(
                archive_file, resolved_archive_file))
            logger.info("loading configuration file {} from cache at {}".format(
                config_file, resolved_config_file))
thomwolf's avatar
thomwolf committed
470
        # Load config
471
        config = OpenAIGPTConfig.from_json_file(resolved_config_file)
thomwolf's avatar
thomwolf committed
472
473
474
        logger.info("Model config {}".format(config))
        # Instantiate model.
        model = cls(config, *inputs, **kwargs)
475
        if state_dict is None and not from_tf:
476
            state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
477
478
        if from_tf:
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
479
            return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
thomwolf's avatar
thomwolf committed
480
481
482
483
484

        old_keys = []
        new_keys = []
        for key in state_dict.keys():
            new_key = None
thomwolf's avatar
thomwolf committed
485
486
487
488
489
490
            if key.endswith(".g"):
                new_key = key[:-2] + ".weight"
            elif key.endswith(".b"):
                new_key = key[:-2] + ".bias"
            elif key.endswith(".w"):
                new_key = key[:-2] + ".weight"
thomwolf's avatar
thomwolf committed
491
492
493
494
495
496
497
498
499
500
            if new_key:
                old_keys.append(key)
                new_keys.append(new_key)
        for old_key, new_key in zip(old_keys, new_keys):
            state_dict[new_key] = state_dict.pop(old_key)

        missing_keys = []
        unexpected_keys = []
        error_msgs = []
        # copy state_dict so _load_from_state_dict can modify it
501
        metadata = getattr(state_dict, "_metadata", None)
thomwolf's avatar
thomwolf committed
502
503
504
505
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

506
        def load(module, prefix=""):
thomwolf's avatar
thomwolf committed
507
508
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            module._load_from_state_dict(
509
510
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
            )
thomwolf's avatar
thomwolf committed
511
512
            for name, child in module._modules.items():
                if child is not None:
513
514
                    load(child, prefix + name + ".")

thomwolf's avatar
thomwolf committed
515
516
        start_model = model
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
thomwolf's avatar
update  
thomwolf committed
517
518
519
            start_model = model.transformer
        load(start_model, prefix="")

thomwolf's avatar
thomwolf committed
520
        if len(missing_keys) > 0:
521
522
523
            logger.info(
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
            )
thomwolf's avatar
thomwolf committed
524
        if len(unexpected_keys) > 0:
525
526
527
            logger.info(
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
            )
thomwolf's avatar
thomwolf committed
528
        if len(error_msgs) > 0:
529
530
531
            raise RuntimeError(
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
            )
532

thomwolf's avatar
thomwolf committed
533
        # Add additional embeddings for special tokens if needed
534
535
        # This step also make sure we are still sharing the output and input embeddings after loading weights
        model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
thomwolf's avatar
thomwolf committed
536
        return model
thomwolf's avatar
thomwolf committed
537
538


thomwolf's avatar
thomwolf committed
539
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
540
541
    """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").

542
543
544
545
546
547
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
548
549
550
551
552
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
553
         config.vocab_size + config.n_special - 1]                  ______________________
554

555
556
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
557
558
559
560
561
562
563
    You should use the associate indices to index the embeddings.

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
564
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
565
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
566
            with the position indices (selected in the range [0, config.n_positions - 1[.
567
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
568
569
570
571
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588

    Outputs:
        `hidden_states`: the encoded-hidden-states at the top of the model
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTModel(config)
    hidden_states = model(input_ids)
    ```
    """
589

590
591
    def __init__(self, config):
        super(OpenAIGPTModel, self).__init__(config)
592
593
594
        num_tokens = config.vocab_size + config.n_special
        self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
595
596
597
        self.drop = nn.Dropout(config.embd_pdrop)
        block = Block(config.n_ctx, config, scale=True)
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
598

thomwolf's avatar
thomwolf committed
599
600
601
602
        self.apply(self.init_weights)
        # nn.init.normal_(self.embed.weight, std=0.02)

    def set_num_special_tokens(self, num_special_tokens):
603
604
605
        " Update input embeddings with new embedding matrice if needed "
        if self.config.n_special == num_special_tokens:
            return
thomwolf's avatar
thomwolf committed
606
607
        # Update config
        self.config.n_special = num_special_tokens
thomwolf's avatar
thomwolf committed
608
        # Build new embeddings and initialize all new embeddings (in particular the special tokens)
609
        old_embed = self.tokens_embed
610
        self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
thomwolf's avatar
thomwolf committed
611
        self.tokens_embed.to(old_embed.device.weight.device)
612
        self.init_weights(self.tokens_embed)
thomwolf's avatar
thomwolf committed
613
614
        # Copy word embeddings from the previous weights
        self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
thomwolf's avatar
thomwolf committed
615

thomwolf's avatar
thomwolf committed
616
617
    def forward(self, input_ids, position_ids=None, token_type_ids=None):
        if position_ids is None:
618
619
620
621
622
            # This was used when we had a single embedding matrice from position and token embeddings
            # start = self.config.vocab_size + self.config.n_special
            # end = start + input_ids.size(-1)
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
thomwolf's avatar
thomwolf committed
623
624
625
626
627
628
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

629
630
        inputs_embeds = self.tokens_embed(input_ids)
        position_embeds = self.positions_embed(position_ids)
thomwolf's avatar
thomwolf committed
631
632
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
633
            token_type_embeds = self.tokens_embed(token_type_ids)
thomwolf's avatar
thomwolf committed
634
635
        else:
            token_type_embeds = 0
thomwolf's avatar
thomwolf committed
636
        # Add the position information to the input embeddings
thomwolf's avatar
thomwolf committed
637
638
        # h = e.sum(dim=2)
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
thomwolf's avatar
thomwolf committed
639
        for block in self.h:
thomwolf's avatar
thomwolf committed
640
            hidden_states = block(hidden_states)
thomwolf's avatar
thomwolf committed
641
642
        output_shape = input_shape + (hidden_states.size(-1),)
        return hidden_states.view(*output_shape)
thomwolf's avatar
thomwolf committed
643

644

thomwolf's avatar
thomwolf committed
645
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
646
647
    """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").

648
649
650
651
652
653
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
654
655
656
657
658
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
659
         config.vocab_size + config.n_special - 1]                  ______________________
660

661
662
663
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
664
665
666
667
668
669

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
670
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
671
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
672
            with the position indices (selected in the range [0, config.n_positions - 1[.
673
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
674
675
676
677
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
678
679
680
681
682
683
684
685
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]

    Outputs:
        if `lm_labels` is not `None`:
            Outputs the language modeling loss.
        else:
686
687
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
                (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
688
689
690
691
692
693
694
695
696
697
698
699

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTLMHeadModel(config)
    lm_logits = model(input_ids)
    ```
    """
700

701
702
703
    def __init__(self, config):
        super(OpenAIGPTLMHeadModel, self).__init__(config)
        self.transformer = OpenAIGPTModel(config)
704
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
thomwolf's avatar
thomwolf committed
705
706
707
        self.apply(self.init_weights)

    def set_num_special_tokens(self, num_special_tokens):
708
709
710
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
        """
thomwolf's avatar
thomwolf committed
711
        self.transformer.set_num_special_tokens(num_special_tokens)
712
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
thomwolf's avatar
thomwolf committed
713
714
715
716
717

    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
        lm_logits = self.lm_head(hidden_states)
        if lm_labels is not None:
718
            # Shift so that tokens < n predict n
719
720
            shift_logits = lm_logits[:, :-1].contiguous()
            shift_labels = lm_labels[:, 1:].contiguous()
721

Catalin Voss's avatar
Catalin Voss committed
722
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
723
            loss_fct = CrossEntropyLoss(ignore_index=-1)
724
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
725
                            shift_labels.view(-1))
thomwolf's avatar
thomwolf committed
726
727
            return loss
        return lm_logits
thomwolf's avatar
thomwolf committed
728

729

thomwolf's avatar
thomwolf committed
730
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
thomwolf's avatar
thomwolf committed
731
    """OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
732

733
734
735
736
737
738
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
739
740
741
742
743
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
744
         config.vocab_size + config.n_special - 1]                  ______________________
745

746
747
748
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.
749
750
751
752
753

    Params:
        config: a OpenAIGPTConfig class instance with the configuration to build a new model

    Inputs:
thomwolf's avatar
thomwolf committed
754
755
756
757
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
            indices selected in the range [0, total_tokens_embeddings[
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
758
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
759
            with the position indices (selected in the range [0, config.n_positions - 1[.
760
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
761
762
763
764
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
765
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
766
767
            with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., total_tokens_embeddings]
768
769
770
771
772
773
774
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].

    Outputs:
        if `lm_labels` and `multiple_choice_labels` are not `None`:
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
        else: a tuple with
775
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
776
777
778
779
780
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]

    Example usage:
    ```python
    # Already been converted into BPE token ids
thomwolf's avatar
thomwolf committed
781
782
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
783
784
785
786

    config = modeling_openai.OpenAIGPTConfig()

    model = modeling_openai.OpenAIGPTLMHeadModel(config)
thomwolf's avatar
thomwolf committed
787
    lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
788
789
    ```
    """
790

791
792
793
    def __init__(self, config):
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
        self.transformer = OpenAIGPTModel(config)
794
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
795
        self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
thomwolf's avatar
thomwolf committed
796
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
797

thomwolf's avatar
thomwolf committed
798
    def set_num_special_tokens(self, num_special_tokens):
799
800
801
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
        """
thomwolf's avatar
thomwolf committed
802
        self.transformer.set_num_special_tokens(num_special_tokens)
803
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
thomwolf's avatar
thomwolf committed
804

thomwolf's avatar
thomwolf committed
805
    def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
thomwolf's avatar
thomwolf committed
806
807
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
808
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
thomwolf's avatar
thomwolf committed
809
810
        losses = []
        if lm_labels is not None:
811
812
            shift_logits = lm_logits[:, :-1].contiguous()
            shift_labels = lm_labels[:, 1:].contiguous()
thomwolf's avatar
thomwolf committed
813
            loss_fct = CrossEntropyLoss(ignore_index=-1)
814
815
            losses.append(loss_fct(shift_logits.view(-1,
                          shift_logits.size(-1)), shift_labels.view(-1)))
816
        if mc_labels is not None:
thomwolf's avatar
thomwolf committed
817
            loss_fct = CrossEntropyLoss()
818
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
thomwolf's avatar
thomwolf committed
819
820
        if losses:
            return losses
821
        return lm_logits, mc_logits