"tests/test_tokenization_openai.py" did not exist on "31d387604c67d738740a9ae9350df0a273802966"
modeling.py 62.7 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function, unicode_literals
thomwolf's avatar
thomwolf committed
19
20
21
22

import copy
import json
import logging
thomwolf's avatar
thomwolf committed
23
24
25
26
import math
import os
import sys
from io import open
thomwolf's avatar
thomwolf committed
27
28
29

import torch
from torch import nn
30
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
31

32
from .file_utils import cached_path
33
from .model_utils import WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel, prune_linear_layer
thomwolf's avatar
thomwolf committed
34
35
36
37

logger = logging.getLogger(__name__)

PRETRAINED_MODEL_ARCHIVE_MAP = {
38
39
40
41
42
43
44
45
46
47
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
48
49
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
50
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
51
52
53
54
55
56
57
58
59
60
61
62
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
thomwolf's avatar
thomwolf committed
63
64
65
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
thomwolf's avatar
thomwolf committed
66
67
}

thomwolf's avatar
thomwolf committed
68

69
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
70
71
    """ Load tf checkpoints in a pytorch model
    """
72
73
74
75
    try:
        import re
        import numpy as np
        import tensorflow as tf
thomwolf's avatar
thomwolf committed
76
    except ImportError:
77
78
79
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    tf_path = os.path.abspath(tf_checkpoint_path)
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
96
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
97
98
99
100
101
102
103
104
105
106
107
108
109
110
            print("Skipping {}".format("/".join(name)))
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel' or l[0] == 'gamma':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias' or l[0] == 'beta':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
111
112
            elif l[0] == 'squad':
                pointer = getattr(pointer, 'classifier')
113
            else:
114
115
116
117
118
                try:
                    pointer = getattr(pointer, l[0])
                except AttributeError:
                    print("Skipping {}".format("/".join(name)))
                    continue
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model


thomwolf's avatar
thomwolf committed
136
137
138
139
def gelu(x):
    """Implementation of the gelu activation function.
        For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
140
        Also see https://arxiv.org/abs/1606.08415
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
148
149
150
151
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


152
class BertConfig(PretrainedConfig):
thomwolf's avatar
thomwolf committed
153
154
    """Configuration class to store the configuration of a `BertModel`.
    """
155
156
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP

thomwolf's avatar
thomwolf committed
157
    def __init__(self,
thomwolf's avatar
thomwolf committed
158
                 vocab_size_or_config_json_file=30522,
thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
                 hidden_size=768,
                 num_hidden_layers=12,
                 num_attention_heads=12,
                 intermediate_size=3072,
                 hidden_act="gelu",
                 hidden_dropout_prob=0.1,
                 attention_probs_dropout_prob=0.1,
                 max_position_embeddings=512,
                 type_vocab_size=2,
168
                 initializer_range=0.02,
169
                 layer_norm_eps=1e-12,
thomwolf's avatar
thomwolf committed
170
                 **kwargs):
thomwolf's avatar
thomwolf committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        """Constructs BertConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
194
            layer_norm_eps: The epsilon used by LayerNorm.
thomwolf's avatar
thomwolf committed
195
        """
thomwolf's avatar
thomwolf committed
196
        super(BertConfig, self).__init__(**kwargs)
thomwolf's avatar
thomwolf committed
197
198
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
199
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
thomwolf's avatar
thomwolf committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.hidden_act = hidden_act
            self.intermediate_size = intermediate_size
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.initializer_range = initializer_range
215
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
216
217
218
219
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")

220

221
222
223
try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
224
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
225
226
227
228
229
230
231
232
233
234
235
236
237
238
    class BertLayerNorm(nn.Module):
        def __init__(self, hidden_size, eps=1e-12):
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(BertLayerNorm, self).__init__()
            self.weight = nn.Parameter(torch.ones(hidden_size))
            self.bias = nn.Parameter(torch.zeros(hidden_size))
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias
thomwolf's avatar
thomwolf committed
239
240
241
242
243
244

class BertEmbeddings(nn.Module):
    """Construct the embeddings from word, position and token_type embeddings.
    """
    def __init__(self, config):
        super(BertEmbeddings, self).__init__()
245
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
246
247
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
248
249
250

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
251
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, input_ids, token_type_ids=None):
        seq_length = input_ids.size(1)
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BertSelfAttention(nn.Module):
thomwolf's avatar
thomwolf committed
272
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
273
274
275
276
277
        super(BertSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
278
        self.output_attentions = config.output_attentions
279

thomwolf's avatar
thomwolf committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

295
    def forward(self, hidden_states, attention_mask, head_mask=None):
thomwolf's avatar
thomwolf committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

317
318
319
320
        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

thomwolf's avatar
thomwolf committed
321
        context_layer = torch.matmul(attention_probs, value_layer)
322

thomwolf's avatar
thomwolf committed
323
324
325
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
326
327
328

        outputs = [context_layer, attention_probs] if self.output_attentions else [context_layer]
        return outputs
thomwolf's avatar
thomwolf committed
329
330
331
332
333
334


class BertSelfOutput(nn.Module):
    def __init__(self, config):
        super(BertSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
335
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
336
337
338
339
340
341
342
343
344
345
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertAttention(nn.Module):
thomwolf's avatar
thomwolf committed
346
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
347
        super(BertAttention, self).__init__()
thomwolf's avatar
thomwolf committed
348
        self.self = BertSelfAttention(config)
thomwolf's avatar
thomwolf committed
349
350
        self.output = BertSelfOutput(config)

thomwolf's avatar
thomwolf committed
351
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
352
353
        if len(heads) == 0:
            return
thomwolf's avatar
thomwolf committed
354
        mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
thomwolf's avatar
thomwolf committed
355
356
357
358
359
360
361
362
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
thomwolf's avatar
thomwolf committed
363
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
thomwolf's avatar
thomwolf committed
364
365
366
367
        # Update hyper params
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads

368
    def forward(self, input_tensor, attention_mask, head_mask=None):
369
370
371
372
        self_outputs = self.self(input_tensor, attention_mask, head_mask)
        attention_output = self.output(self_outputs[0], input_tensor)
        outputs = [attention_output] + self_outputs[1:]  # add attentions if we output them
        return outputs
thomwolf's avatar
thomwolf committed
373
374
375
376
377
378


class BertIntermediate(nn.Module):
    def __init__(self, config):
        super(BertIntermediate, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
thomwolf's avatar
thomwolf committed
379
380
381
382
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act
thomwolf's avatar
thomwolf committed
383
384
385
386
387
388
389
390
391
392
393

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BertOutput(nn.Module):
    def __init__(self, config):
        super(BertOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
394
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
395
396
397
398
399
400
401
402
403
404
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertLayer(nn.Module):
thomwolf's avatar
thomwolf committed
405
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
406
        super(BertLayer, self).__init__()
thomwolf's avatar
thomwolf committed
407
        self.attention = BertAttention(config)
thomwolf's avatar
thomwolf committed
408
409
410
        self.intermediate = BertIntermediate(config)
        self.output = BertOutput(config)

411
    def forward(self, hidden_states, attention_mask, head_mask=None):
412
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
thomwolf's avatar
thomwolf committed
413
414
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(attention_output)
thomwolf's avatar
thomwolf committed
415
        layer_output = self.output(intermediate_output, attention_output)
416
417
        outputs = [layer_output] + attention_outputs[1:]  # add attentions if we output them
        return outputs
thomwolf's avatar
thomwolf committed
418
419
420


class BertEncoder(nn.Module):
thomwolf's avatar
thomwolf committed
421
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
422
        super(BertEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
423
424
425
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
        layer = BertLayer(config)
426
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
thomwolf's avatar
thomwolf committed
427

428
429
    def forward(self, hidden_states, attention_mask, head_mask=None):
        all_hidden_states = []
thomwolf's avatar
thomwolf committed
430
        all_attentions = []
431
        for i, layer_module in enumerate(self.layer):
432
433
434
435
436
437
            if self.output_hidden_states:
                all_hidden_states.append(hidden_states)

            layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
            hidden_states = layer_outputs[0]

thomwolf's avatar
thomwolf committed
438
            if self.output_attentions:
439
440
441
442
443
444
445
446
447
                all_attentions.append(layer_outputs[1])

        # Add last layer
        if self.output_hidden_states:
            all_hidden_states.append(hidden_states)

        outputs = [hidden_states]
        if self.output_hidden_states:
            outputs.append(all_hidden_states)
thomwolf's avatar
thomwolf committed
448
        if self.output_attentions:
449
450
            outputs.append(all_attentions)
        return outputs  # outputs, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471


class BertPooler(nn.Module):
    def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output


class BertPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super(BertPredictionHeadTransform, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
472
473
474
475
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
476
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


class BertLMPredictionHead(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
                                 bert_model_embedding_weights.size(0),
                                 bias=False)
        self.decoder.weight = bert_model_embedding_weights
        self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states) + self.bias
        return hidden_states


class BertOnlyMLMHead(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertOnlyMLMHead, self).__init__()
        self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class BertOnlyNSPHead(nn.Module):
    def __init__(self, config):
        super(BertOnlyNSPHead, self).__init__()
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, pooled_output):
        seq_relationship_score = self.seq_relationship(pooled_output)
        return seq_relationship_score


class BertPreTrainingHeads(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertPreTrainingHeads, self).__init__()
        self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, sequence_output, pooled_output):
        prediction_scores = self.predictions(sequence_output)
        seq_relationship_score = self.seq_relationship(pooled_output)
        return prediction_scores, seq_relationship_score


536
class BertPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
537
538
539
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
540
541
542
543
544
545
    config_class = BertConfig
    pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP
    load_tf_weights = load_tf_weights_in_bert
    base_model_prefix = "bert"

thomwolf's avatar
thomwolf committed
546
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
547
548
549
550
551
552
553
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
Li Dong's avatar
Li Dong committed
554
555
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
556
557
558
559
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()


thomwolf's avatar
thomwolf committed
560
class BertModel(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
561
562
563
    """BERT model ("Bidirectional Embedding Representations from a Transformer").

    Params:
564
565
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
566
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
567
568
569
570

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
571
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
572
573
574
575
576
577
578
579
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
580
581
582
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.

thomwolf's avatar
thomwolf committed
583
584
585
586
587
588
589

    Outputs: Tuple of (encoded_layers, pooled_output)
        `encoded_layers`: controled by `output_all_encoded_layers` argument:
            - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
                of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
                encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
            - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
590
                to the last attention block of shape [batch_size, sequence_length, hidden_size],
thomwolf's avatar
thomwolf committed
591
592
        `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
            classifier pretrained on top of the hidden state associated to the first character of the
thomwolf's avatar
thomwolf committed
593
            input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
thomwolf's avatar
thomwolf committed
594
595
596
597
598
599

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
600
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
601

thomwolf's avatar
thomwolf committed
602
603
    config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
604
605
606
607
608

    model = modeling.BertModel(config=config)
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
609
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
610
        super(BertModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
611

thomwolf's avatar
thomwolf committed
612
        self.embeddings = BertEmbeddings(config)
thomwolf's avatar
thomwolf committed
613
        self.encoder = BertEncoder(config)
thomwolf's avatar
thomwolf committed
614
        self.pooler = BertPooler(config)
thomwolf's avatar
thomwolf committed
615

thomwolf's avatar
thomwolf committed
616
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
617

thomwolf's avatar
thomwolf committed
618
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
619
620
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
thomwolf's avatar
thomwolf committed
621
            See base class PreTrainedModel
thomwolf's avatar
thomwolf committed
622
623
624
625
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

626
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

thomwolf's avatar
thomwolf committed
647
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
648
        # 1.0 in head_mask indicate we keep the head
thomwolf's avatar
thomwolf committed
649
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
650
651
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
thomwolf's avatar
thomwolf committed
652
653
        if head_mask is not None:
            if head_mask.dim() == 1:
654
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
655
                head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
656
            elif head_mask.dim() == 2:
657
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
thomwolf's avatar
thomwolf committed
658
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
659
660
        else:
            head_mask = [None] * self.config.num_hidden_layers
thomwolf's avatar
thomwolf committed
661

thomwolf's avatar
thomwolf committed
662
        embedding_output = self.embeddings(input_ids, token_type_ids)
663
664
665
666
        encoder_outputs = self.encoder(embedding_output,
                                       extended_attention_mask,
                                       head_mask=head_mask)
        sequence_output = encoder_outputs[0]
thomwolf's avatar
thomwolf committed
667
        pooled_output = self.pooler(sequence_output)
668
669
670

        outputs = [sequence_output, pooled_output] + encoder_outputs[1:]  # add hidden_states and attentions if they are here
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
671
672


thomwolf's avatar
thomwolf committed
673
class BertForPreTraining(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
674
675
676
677
678
679
    """BERT model with pre-training heads.
    This module comprises the BERT model followed by the two pre-training heads:
        - the masked language modeling head, and
        - the next sentence classification head.

    Params:
680
681
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
682
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
683
684
685
686

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
687
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
688
689
690
691
692
693
694
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
695
        `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
thomwolf's avatar
thomwolf committed
696
697
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
698
        `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
thomwolf's avatar
thomwolf committed
699
700
            with indices selected in [0, 1].
            0 => next sentence is the continuation, 1 => next sentence is a random sentence.
701
702
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
703
704
705
706
707
708
709

    Outputs:
        if `masked_lm_labels` and `next_sentence_label` are not `None`:
            Outputs the total_loss which is the sum of the masked language modeling loss and the next
            sentence classification loss.
        if `masked_lm_labels` or `next_sentence_label` is `None`:
            Outputs a tuple comprising
710
711
            - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
            - the next sentence classification logits of shape [batch_size, 2].
thomwolf's avatar
thomwolf committed
712
713
714
715
716
717

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
718
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
719

thomwolf's avatar
thomwolf committed
720
721
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
722
723
724
725
726

    model = BertForPreTraining(config)
    masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
727
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
728
        super(BertForPreTraining, self).__init__(config)
729

thomwolf's avatar
thomwolf committed
730
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
731
        self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
thomwolf's avatar
thomwolf committed
732

thomwolf's avatar
thomwolf committed
733
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
734

735
736
737
738
739
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                next_sentence_label=None, head_mask=None):
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)

        sequence_output, pooled_output = outputs[:2]
thomwolf's avatar
thomwolf committed
740
741
        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

742
743
        outputs = [prediction_scores, seq_relationship_score] + outputs[2:]  # add hidden states and attention if they are here

thomwolf's avatar
thomwolf committed
744
745
        if masked_lm_labels is not None and next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
746
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
747
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
748
            total_loss = masked_lm_loss + next_sentence_loss
749
750
751
            outputs = [total_loss] + outputs

        return outputs  # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
752
753


thomwolf's avatar
thomwolf committed
754
class BertForMaskedLM(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
755
756
757
758
    """BERT model with the masked language modeling head.
    This module comprises the BERT model followed by the masked language modeling head.

    Params:
759
760
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
761
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
762
763
764
765

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
766
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
767
768
769
770
771
772
773
774
775
776
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
777
778
779
780
781
782
        `head_mask`: an optional torch.LongTensor of shape [num_heads] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
783
784

    Outputs:
wlhgtc's avatar
wlhgtc committed
785
        if `masked_lm_labels` is  not `None`:
thomwolf's avatar
thomwolf committed
786
787
            Outputs the masked language modeling loss.
        if `masked_lm_labels` is `None`:
788
            Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
thomwolf's avatar
thomwolf committed
789
790
791
792
793
794

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
795
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
796

thomwolf's avatar
thomwolf committed
797
798
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
799
800
801
802
803

    model = BertForMaskedLM(config)
    masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
804
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
805
        super(BertForMaskedLM, self).__init__(config)
thomwolf's avatar
thomwolf committed
806

thomwolf's avatar
thomwolf committed
807
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
808
        self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
thomwolf's avatar
thomwolf committed
809

thomwolf's avatar
thomwolf committed
810
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
811

812
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
813
814
815
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)

        sequence_output = outputs[0]
thomwolf's avatar
thomwolf committed
816
817
        prediction_scores = self.cls(sequence_output)

thomwolf's avatar
thomwolf committed
818
        outputs = [prediction_scores] + outputs[2:]  # Add hidden states and attention is they are here
thomwolf's avatar
thomwolf committed
819
820
        if masked_lm_labels is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
821
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
thomwolf's avatar
thomwolf committed
822
823
824
            outputs = [masked_lm_loss] + outputs

        return outputs  # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
825
826


thomwolf's avatar
thomwolf committed
827
class BertForNextSentencePrediction(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
828
829
830
831
    """BERT model with next sentence prediction head.
    This module comprises the BERT model followed by the next sentence classification head.

    Params:
832
833
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
834
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
835
836
837
838

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
839
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
840
841
842
843
844
845
846
847
848
849
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
            with indices selected in [0, 1].
            0 => next sentence is the continuation, 1 => next sentence is a random sentence.
850
851
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
852
853
854
855
856
857

    Outputs:
        if `next_sentence_label` is not `None`:
            Outputs the total_loss which is the sum of the masked language modeling loss and the next
            sentence classification loss.
        if `next_sentence_label` is `None`:
858
            Outputs the next sentence classification logits of shape [batch_size, 2].
thomwolf's avatar
thomwolf committed
859
860
861
862
863
864

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
865
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
866

thomwolf's avatar
thomwolf committed
867
868
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
869
870
871
872
873

    model = BertForNextSentencePrediction(config)
    seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
874
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
875
        super(BertForNextSentencePrediction, self).__init__(config)
thomwolf's avatar
thomwolf committed
876

thomwolf's avatar
thomwolf committed
877
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
878
        self.cls = BertOnlyNSPHead(config)
thomwolf's avatar
thomwolf committed
879

thomwolf's avatar
thomwolf committed
880
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
881

882
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
thomwolf's avatar
thomwolf committed
883
884
885
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

886
        seq_relationship_score = self.cls(pooled_output)
thomwolf's avatar
thomwolf committed
887

thomwolf's avatar
thomwolf committed
888
        outputs = [seq_relationship_score] + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
889
890
        if next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
891
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
892
893
894
            outputs = [next_sentence_loss] + outputs

        return outputs  # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
895
896


thomwolf's avatar
thomwolf committed
897
class BertForSequenceClassification(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
898
899
900
901
902
    """BERT model for classification.
    This module is composed of the BERT model with a linear layer on top of
    the pooled output.

    Params:
903
904
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
905
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
906
907
908
909
        `num_labels`: the number of classes for the classifier. Default = 2.

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
910
            with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
911
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
912
913
914
915
916
917
918
919
920
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_labels].
921
922
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
923
924
925
926
927

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
928
            Outputs the classification logits of shape [batch_size, num_labels].
thomwolf's avatar
thomwolf committed
929
930
931
932
933
934

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
935
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
936

thomwolf's avatar
thomwolf committed
937
938
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
939
940
941
942
943
944
945

    num_labels = 2

    model = BertForSequenceClassification(config, num_labels)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
946
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
947
        super(BertForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
948
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
949

thomwolf's avatar
thomwolf committed
950
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
951
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
952
        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
thomwolf's avatar
thomwolf committed
953

thomwolf's avatar
thomwolf committed
954
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
955

956
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
957
958
959
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

thomwolf's avatar
thomwolf committed
960
961
962
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

thomwolf's avatar
thomwolf committed
963
964
        outputs = [logits] + outputs[2:]  # add hidden states and attention if they are here

thomwolf's avatar
thomwolf committed
965
        if labels is not None:
966
967
968
969
970
971
972
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
thomwolf's avatar
thomwolf committed
973
974
975
            outputs = [loss] + outputs

        return outputs  # (loss), logits, (hidden_states), (attentions)
976
977


thomwolf's avatar
thomwolf committed
978
class BertForMultipleChoice(BertPreTrainedModel):
979
980
981
982
983
    """BERT model for multiple choice tasks.
    This module is composed of the BERT model with a linear layer on top of
    the pooled output.

    Params:
984
985
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
986
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
987
988
989
990

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
991
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
992
993
994
995
996
997
998
999
1000
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
            with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
            and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].
1001
1002
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
            Outputs the classification logits of shape [batch_size, num_labels].

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
    input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
    token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

thomwolf's avatar
thomwolf committed
1019
    model = BertForMultipleChoice(config)
1020
1021
1022
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1023
    def __init__(self, config):
1024
        super(BertForMultipleChoice, self).__init__(config)
thomwolf's avatar
thomwolf committed
1025

thomwolf's avatar
thomwolf committed
1026
        self.bert = BertModel(config)
1027
1028
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
thomwolf's avatar
thomwolf committed
1029

thomwolf's avatar
thomwolf committed
1030
        self.apply(self.init_weights)
1031

1032
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1033
1034
1035
        """ Input shapes should be [bsz, num choices, seq length] """
        num_choices = input_ids.shape[1]

1036
        flat_input_ids = input_ids.view(-1, input_ids.size(-1))
1037
1038
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
thomwolf's avatar
thomwolf committed
1039
1040
1041
        outputs = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

1042
1043
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
thomwolf's avatar
thomwolf committed
1044
        reshaped_logits = logits.view(-1, num_choices)
1045

thomwolf's avatar
thomwolf committed
1046
1047
        outputs = [reshaped_logits] + outputs[2:]  # add hidden states and attention if they are here

1048
1049
1050
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
thomwolf's avatar
thomwolf committed
1051
1052
1053
            outputs = [loss] + outputs

        return outputs  # (loss), reshaped_logits, (hidden_states), (attentions)
1054
1055


thomwolf's avatar
thomwolf committed
1056
class BertForTokenClassification(BertPreTrainedModel):
1057
1058
1059
1060
1061
    """BERT model for token-level classification.
    This module is composed of the BERT model with a linear layer on top of
    the full hidden state of the last layer.

    Params:
1062
1063
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1064
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
1065
1066
1067
1068
1069
        `num_labels`: the number of classes for the classifier. Default = 2.

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1070
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
1071
1072
1073
1074
1075
1076
1077
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
thomwolf's avatar
thomwolf committed
1078
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
1079
            with indices selected in [0, ..., num_labels].
1080
1081
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
1082
1083
1084
1085
1086

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
1087
            Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

    num_labels = 2

    model = BertForTokenClassification(config, num_labels)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1105
    def __init__(self, config):
1106
        super(BertForTokenClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
1107
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
1108

thomwolf's avatar
thomwolf committed
1109
        self.bert = BertModel(config)
1110
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
1111
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1112

thomwolf's avatar
thomwolf committed
1113
        self.apply(self.init_weights)
1114

1115
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1116
1117
1118
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        sequence_output = outputs[0]

1119
1120
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
1121

thomwolf's avatar
thomwolf committed
1122
        outputs = [logits] + outputs[2:]  # add hidden states and attention if they are here
1123
1124
        if labels is not None:
            loss_fct = CrossEntropyLoss()
1125
1126
1127
1128
1129
1130
1131
1132
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
thomwolf's avatar
thomwolf committed
1133
1134
1135
            outputs = [loss] + outputs

        return outputs  # (loss), logits, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
1136
1137


thomwolf's avatar
thomwolf committed
1138
class BertForQuestionAnswering(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
1139
1140
1141
1142
1143
    """BERT model for Question Answering (span extraction).
    This module is composed of the BERT model with a linear layer on top of
    the sequence output that computes start_logits and end_logits

    Params:
1144
1145
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1146
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1147
1148
1149
1150

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1151
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
1165
1166
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
1167
1168
1169
1170
1171
1172

    Outputs:
        if `start_positions` and `end_positions` are not `None`:
            Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
        if `start_positions` or `end_positions` is `None`:
            Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
1173
            position tokens of shape [batch_size, sequence_length].
thomwolf's avatar
thomwolf committed
1174
1175
1176
1177
1178
1179

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
1180
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
1181

thomwolf's avatar
thomwolf committed
1182
1183
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
1184
1185
1186
1187
1188

    model = BertForQuestionAnswering(config)
    start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1189
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1190
        super(BertForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1191
1192
1193
1194
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1195

thomwolf's avatar
thomwolf committed
1196
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1197

thomwolf's avatar
thomwolf committed
1198
1199
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
                end_positions=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1200
1201
1202
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        sequence_output = outputs[0]

thomwolf's avatar
thomwolf committed
1203
1204
1205
1206
1207
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

thomwolf's avatar
thomwolf committed
1208
        outputs = [start_logits, end_logits] + outputs[2:]
thomwolf's avatar
thomwolf committed
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
thomwolf's avatar
thomwolf committed
1224
1225
1226
            outputs = [total_loss] + outputs

        return outputs  # (loss), start_logits, end_logits, (hidden_states), (attentions)