modeling_bert.py 65.1 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
thomwolf's avatar
thomwolf committed
16
"""PyTorch BERT model. """
thomwolf's avatar
thomwolf committed
17

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function, unicode_literals
thomwolf's avatar
thomwolf committed
19
20
21

import json
import logging
thomwolf's avatar
thomwolf committed
22
23
24
25
import math
import os
import sys
from io import open
thomwolf's avatar
thomwolf committed
26
27
28

import torch
from torch import nn
29
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
30

thomwolf's avatar
thomwolf committed
31
32
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
                             prune_linear_layer, add_start_docstrings)
thomwolf's avatar
thomwolf committed
33
34
35

logger = logging.getLogger(__name__)

36
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
37
38
39
40
41
42
43
44
45
46
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
47
48
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
49
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
50
}
51

52
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
53
54
55
56
57
58
59
60
61
62
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
thomwolf's avatar
thomwolf committed
63
64
65
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
thomwolf's avatar
thomwolf committed
66
67
}

thomwolf's avatar
thomwolf committed
68

69
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
thomwolf's avatar
thomwolf committed
70
    """ Load tf checkpoints in a pytorch model.
71
    """
72
73
74
75
    try:
        import re
        import numpy as np
        import tensorflow as tf
thomwolf's avatar
thomwolf committed
76
    except ImportError:
Kevin Trebing's avatar
Kevin Trebing committed
77
        logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
78
79
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
80
    tf_path = os.path.abspath(tf_checkpoint_path)
thomwolf's avatar
thomwolf committed
81
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
82
83
84
85
86
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
87
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
88
89
90
91
92
93
94
95
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
96
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
thomwolf's avatar
thomwolf committed
97
            logger.info("Skipping {}".format("/".join(name)))
98
99
100
101
102
103
104
105
106
107
108
109
110
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel' or l[0] == 'gamma':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias' or l[0] == 'beta':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
111
112
            elif l[0] == 'squad':
                pointer = getattr(pointer, 'classifier')
113
            else:
114
115
116
                try:
                    pointer = getattr(pointer, l[0])
                except AttributeError:
thomwolf's avatar
thomwolf committed
117
                    logger.info("Skipping {}".format("/".join(name)))
118
                    continue
119
120
121
122
123
124
125
126
127
128
129
130
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
thomwolf's avatar
thomwolf committed
131
        logger.info("Initialize PyTorch weight {}".format(name))
132
133
134
135
        pointer.data = torch.from_numpy(array)
    return model


thomwolf's avatar
thomwolf committed
136
137
138
139
def gelu(x):
    """Implementation of the gelu activation function.
        For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
140
        Also see https://arxiv.org/abs/1606.08415
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
148
149
150
151
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


152
class BertConfig(PretrainedConfig):
153
    r"""
154
        :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
155
        `BertModel`.
156

thomwolf's avatar
thomwolf committed
157

158
        Arguments:
thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
179
            layer_norm_eps: The epsilon used by LayerNorm.
180
    """
thomwolf's avatar
thomwolf committed
181
    pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

    def __init__(self,
                 vocab_size_or_config_json_file=30522,
                 hidden_size=768,
                 num_hidden_layers=12,
                 num_attention_heads=12,
                 intermediate_size=3072,
                 hidden_act="gelu",
                 hidden_dropout_prob=0.1,
                 attention_probs_dropout_prob=0.1,
                 max_position_embeddings=512,
                 type_vocab_size=2,
                 initializer_range=0.02,
                 layer_norm_eps=1e-12,
                 **kwargs):
thomwolf's avatar
thomwolf committed
197
        super(BertConfig, self).__init__(**kwargs)
thomwolf's avatar
thomwolf committed
198
199
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
200
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
thomwolf's avatar
thomwolf committed
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.hidden_act = hidden_act
            self.intermediate_size = intermediate_size
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.initializer_range = initializer_range
216
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
217
218
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
VictorSanh's avatar
VictorSanh committed
219
                             " or the path to a pretrained model config file (str)")
thomwolf's avatar
thomwolf committed
220

221

222

223
224
try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
雷打不动!'s avatar
雷打不动! committed
225
except (ImportError, AttributeError) as e:
226
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
227
    BertLayerNorm = torch.nn.LayerNorm
thomwolf's avatar
thomwolf committed
228
229
230
231
232
233

class BertEmbeddings(nn.Module):
    """Construct the embeddings from word, position and token_type embeddings.
    """
    def __init__(self, config):
        super(BertEmbeddings, self).__init__()
234
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
235
236
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
237
238
239

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
240
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
241
242
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

thomwolf's avatar
thomwolf committed
243
    def forward(self, input_ids, token_type_ids=None, position_ids=None):
thomwolf's avatar
thomwolf committed
244
        seq_length = input_ids.size(1)
thomwolf's avatar
thomwolf committed
245
246
247
        if position_ids is None:
            position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
thomwolf's avatar
thomwolf committed
248
249
250
251
252
253
254
255
256
257
258
259
260
261
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BertSelfAttention(nn.Module):
thomwolf's avatar
thomwolf committed
262
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
263
264
265
266
267
        super(BertSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
268
        self.output_attentions = config.output_attentions
269

thomwolf's avatar
thomwolf committed
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

285
    def forward(self, hidden_states, attention_mask, head_mask=None):
thomwolf's avatar
thomwolf committed
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

307
308
309
310
        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

thomwolf's avatar
thomwolf committed
311
        context_layer = torch.matmul(attention_probs, value_layer)
312

thomwolf's avatar
thomwolf committed
313
314
315
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
316

317
        outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
318
        return outputs
thomwolf's avatar
thomwolf committed
319
320
321
322
323
324


class BertSelfOutput(nn.Module):
    def __init__(self, config):
        super(BertSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
325
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
326
327
328
329
330
331
332
333
334
335
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertAttention(nn.Module):
thomwolf's avatar
thomwolf committed
336
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
337
        super(BertAttention, self).__init__()
thomwolf's avatar
thomwolf committed
338
        self.self = BertSelfAttention(config)
thomwolf's avatar
thomwolf committed
339
340
        self.output = BertSelfOutput(config)

thomwolf's avatar
thomwolf committed
341
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
342
343
        if len(heads) == 0:
            return
thomwolf's avatar
thomwolf committed
344
        mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
thomwolf's avatar
thomwolf committed
345
346
347
348
349
350
351
352
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
thomwolf's avatar
thomwolf committed
353
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
thomwolf's avatar
thomwolf committed
354
355
356
357
        # Update hyper params
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads

358
    def forward(self, input_tensor, attention_mask, head_mask=None):
359
360
        self_outputs = self.self(input_tensor, attention_mask, head_mask)
        attention_output = self.output(self_outputs[0], input_tensor)
361
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
362
        return outputs
thomwolf's avatar
thomwolf committed
363
364
365
366
367
368


class BertIntermediate(nn.Module):
    def __init__(self, config):
        super(BertIntermediate, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
thomwolf's avatar
thomwolf committed
369
370
371
372
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act
thomwolf's avatar
thomwolf committed
373
374
375
376
377
378
379
380
381
382
383

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BertOutput(nn.Module):
    def __init__(self, config):
        super(BertOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
384
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
385
386
387
388
389
390
391
392
393
394
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertLayer(nn.Module):
thomwolf's avatar
thomwolf committed
395
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
396
        super(BertLayer, self).__init__()
thomwolf's avatar
thomwolf committed
397
        self.attention = BertAttention(config)
thomwolf's avatar
thomwolf committed
398
399
400
        self.intermediate = BertIntermediate(config)
        self.output = BertOutput(config)

401
    def forward(self, hidden_states, attention_mask, head_mask=None):
402
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
thomwolf's avatar
thomwolf committed
403
404
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(attention_output)
thomwolf's avatar
thomwolf committed
405
        layer_output = self.output(intermediate_output, attention_output)
406
        outputs = (layer_output,) + attention_outputs[1:]  # add attentions if we output them
407
        return outputs
thomwolf's avatar
thomwolf committed
408
409
410


class BertEncoder(nn.Module):
thomwolf's avatar
thomwolf committed
411
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
412
        super(BertEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
413
414
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
415
        self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
thomwolf's avatar
thomwolf committed
416

417
    def forward(self, hidden_states, attention_mask, head_mask=None):
418
419
        all_hidden_states = ()
        all_attentions = ()
420
        for i, layer_module in enumerate(self.layer):
421
            if self.output_hidden_states:
422
                all_hidden_states = all_hidden_states + (hidden_states,)
423
424
425
426

            layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
            hidden_states = layer_outputs[0]

thomwolf's avatar
thomwolf committed
427
            if self.output_attentions:
428
                all_attentions = all_attentions + (layer_outputs[1],)
429
430
431

        # Add last layer
        if self.output_hidden_states:
432
            all_hidden_states = all_hidden_states + (hidden_states,)
433

434
        outputs = (hidden_states,)
435
        if self.output_hidden_states:
436
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
437
        if self.output_attentions:
438
            outputs = outputs + (all_attentions,)
439
        return outputs  # last-layer hidden state, (all hidden states), (all attentions)
thomwolf's avatar
thomwolf committed
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460


class BertPooler(nn.Module):
    def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output


class BertPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super(BertPredictionHeadTransform, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
461
462
463
464
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
465
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
466
467
468
469
470
471
472
473
474

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


class BertLMPredictionHead(nn.Module):
thomwolf's avatar
thomwolf committed
475
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
476
477
478
479
480
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
thomwolf's avatar
thomwolf committed
481
482
        self.decoder = nn.Linear(config.hidden_size,
                                 config.vocab_size,
thomwolf's avatar
thomwolf committed
483
                                 bias=False)
484

thomwolf's avatar
thomwolf committed
485
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
thomwolf's avatar
thomwolf committed
486
487
488
489
490
491
492
493

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states) + self.bias
        return hidden_states


class BertOnlyMLMHead(nn.Module):
thomwolf's avatar
thomwolf committed
494
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
495
        super(BertOnlyMLMHead, self).__init__()
thomwolf's avatar
thomwolf committed
496
        self.predictions = BertLMPredictionHead(config)
thomwolf's avatar
thomwolf committed
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class BertOnlyNSPHead(nn.Module):
    def __init__(self, config):
        super(BertOnlyNSPHead, self).__init__()
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, pooled_output):
        seq_relationship_score = self.seq_relationship(pooled_output)
        return seq_relationship_score


class BertPreTrainingHeads(nn.Module):
thomwolf's avatar
thomwolf committed
514
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
515
        super(BertPreTrainingHeads, self).__init__()
thomwolf's avatar
thomwolf committed
516
        self.predictions = BertLMPredictionHead(config)
thomwolf's avatar
thomwolf committed
517
518
519
520
521
522
523
524
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, sequence_output, pooled_output):
        prediction_scores = self.predictions(sequence_output)
        seq_relationship_score = self.seq_relationship(pooled_output)
        return prediction_scores, seq_relationship_score


525
class BertPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
526
527
528
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
529
    config_class = BertConfig
530
    pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
531
532
533
    load_tf_weights = load_tf_weights_in_bert
    base_model_prefix = "bert"

534
535
536
    def __init__(self, *inputs, **kwargs):
        super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)

thomwolf's avatar
thomwolf committed
537
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
538
539
540
541
542
543
544
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
Li Dong's avatar
Li Dong committed
545
546
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
547
548
549
550
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()


thomwolf's avatar
thomwolf committed
551
552
553
554
555
BERT_START_DOCSTRING = r"""    The BERT model was proposed in
    `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
    by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
    pre-trained using a combination of masked language modeling objective and next sentence prediction
    on a large corpus comprising the Toronto Book Corpus and Wikipedia.
556

thomwolf's avatar
thomwolf committed
557
558
    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.
thomwolf's avatar
thomwolf committed
559

thomwolf's avatar
thomwolf committed
560
561
    .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
        https://arxiv.org/abs/1810.04805
thomwolf's avatar
thomwolf committed
562

thomwolf's avatar
thomwolf committed
563
564
    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module
565

thomwolf's avatar
thomwolf committed
566
    Parameters:
567
568
569
        config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model. 
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
"""

BERT_INPUTS_DOCSTRING = r"""
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:

            (a) For sequence pairs:

                ``tokens:         [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
                
                ``token_type_ids:   0   0  0    0    0     0       0   0   1  1  1  1   1   1``

            (b) For single sequences:

                ``tokens:         [CLS] the dog is hairy . [SEP]``
                
                ``token_type_ids:   0   0   0   0  0     0   0``
thomwolf's avatar
thomwolf committed
589
590
591
592

            Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on
            the right rather than the left.

thomwolf's avatar
thomwolf committed
593
594
595
            Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
thomwolf's avatar
thomwolf committed
596
597
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
598
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
599
600
601
602
603
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Segment token indices to indicate first and second portions of the inputs.
            Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
            corresponds to a `sentence B` token
            (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
thomwolf's avatar
thomwolf committed
604
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
605
            Mask to avoid performing attention on padding token indices.
thomwolf's avatar
thomwolf committed
606
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
607
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
608
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
609
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
610
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
611
612
613
614
615
616
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
                      BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
617
    r"""
thomwolf's avatar
thomwolf committed
618
619
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
thomwolf's avatar
thomwolf committed
620
621
622
623
624
625
626
627
            Sequence of hidden-states at the output of the last layer of the model.
        **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
            Last layer hidden-state of the first token of the sequence (classification token)
            further processed by a Linear layer and a Tanh activation function. The Linear
            layer weights are trained from the next sentence prediction (classification)
            objective during Bert pretraining. This output is usually *not* a good summary
            of the semantic content of the input, you're often better with averaging or pooling
            the sequence of hidden-states for the whole input sequence.
thomwolf's avatar
thomwolf committed
628
629
630
631
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
632
633
634
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
635
636
637

    Examples::

wangfei's avatar
wangfei committed
638
639
640
641
642
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertModel.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
thomwolf's avatar
thomwolf committed
643
644

    """
thomwolf's avatar
thomwolf committed
645
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
646
        super(BertModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
647

thomwolf's avatar
thomwolf committed
648
        self.embeddings = BertEmbeddings(config)
thomwolf's avatar
thomwolf committed
649
        self.encoder = BertEncoder(config)
thomwolf's avatar
thomwolf committed
650
        self.pooler = BertPooler(config)
thomwolf's avatar
thomwolf committed
651

652
653
        if hasattr(config, "pruned_heads"):
            pruned_heads = config.pruned_heads.copy().items()
654
            config.pruned_heads = {}
655
656
657
658
            for layer, heads in pruned_heads:
                if self.encoder.layer[int(layer)].attention.self.num_attention_heads == config.num_attention_heads:
                    self.prune_heads({int(layer): list(map(int, heads))})

thomwolf's avatar
thomwolf committed
659
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
660

thomwolf's avatar
thomwolf committed
661
662
663
664
    def _resize_token_embeddings(self, new_num_tokens):
        old_embeddings = self.embeddings.word_embeddings
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.embeddings.word_embeddings = new_embeddings
thomwolf's avatar
thomwolf committed
665
        return self.embeddings.word_embeddings
thomwolf's avatar
thomwolf committed
666

thomwolf's avatar
thomwolf committed
667
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
668
669
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
thomwolf's avatar
thomwolf committed
670
            See base class PreTrainedModel
thomwolf's avatar
thomwolf committed
671
672
673
674
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

thomwolf's avatar
thomwolf committed
675
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

thomwolf's avatar
thomwolf committed
696
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
697
        # 1.0 in head_mask indicate we keep the head
thomwolf's avatar
thomwolf committed
698
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
699
700
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
thomwolf's avatar
thomwolf committed
701
702
        if head_mask is not None:
            if head_mask.dim() == 1:
703
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
704
                head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
705
            elif head_mask.dim() == 2:
706
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
thomwolf's avatar
thomwolf committed
707
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
708
709
        else:
            head_mask = [None] * self.config.num_hidden_layers
thomwolf's avatar
thomwolf committed
710

thomwolf's avatar
thomwolf committed
711
        embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
712
713
714
715
        encoder_outputs = self.encoder(embedding_output,
                                       extended_attention_mask,
                                       head_mask=head_mask)
        sequence_output = encoder_outputs[0]
thomwolf's avatar
thomwolf committed
716
        pooled_output = self.pooler(sequence_output)
717

718
        outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]  # add hidden_states and attentions if they are here
719
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
720
721


thomwolf's avatar
thomwolf committed
722
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
thomwolf's avatar
thomwolf committed
723
724
    a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
725
class BertForPreTraining(BertPreTrainedModel):
726
    r"""
thomwolf's avatar
thomwolf committed
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
        **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the masked language modeling loss.
            Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
            Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
            in ``[0, ..., config.vocab_size]``
        **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
            Indices should be in ``[0, 1]``.
            ``0`` indicates sequence B is a continuation of sequence A,
            ``1`` indicates sequence B is a random sequence.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
749
750
751
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
752
753
754

    Examples::

wangfei's avatar
wangfei committed
755
756
757
758
759
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForPreTraining.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        prediction_scores, seq_relationship_scores = outputs[:2]
760

thomwolf's avatar
thomwolf committed
761
    """
thomwolf's avatar
thomwolf committed
762
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
763
        super(BertForPreTraining, self).__init__(config)
764

thomwolf's avatar
thomwolf committed
765
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
766
        self.cls = BertPreTrainingHeads(config)
thomwolf's avatar
thomwolf committed
767

thomwolf's avatar
thomwolf committed
768
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
769
770
771
772
773
774
        self.tie_weights()

    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
        """
thomwolf's avatar
thomwolf committed
775
776
        self._tie_or_clone_weights(self.cls.predictions.decoder,
                                   self.bert.embeddings.word_embeddings)
thomwolf's avatar
thomwolf committed
777

thomwolf's avatar
thomwolf committed
778
779
780
781
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                next_sentence_label=None, position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
782
783

        sequence_output, pooled_output = outputs[:2]
thomwolf's avatar
thomwolf committed
784
785
        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

786
        outputs = (prediction_scores, seq_relationship_score,) + outputs[2:]  # add hidden states and attention if they are here
787

thomwolf's avatar
thomwolf committed
788
789
        if masked_lm_labels is not None and next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
790
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
791
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
792
            total_loss = masked_lm_loss + next_sentence_loss
793
            outputs = (total_loss,) + outputs
794
795

        return outputs  # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
796
797


thomwolf's avatar
thomwolf committed
798
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
thomwolf's avatar
thomwolf committed
799
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
800
class BertForMaskedLM(BertPreTrainedModel):
801
    r"""
thomwolf's avatar
thomwolf committed
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
        **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the masked language modeling loss.
            Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
            Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
            in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Masked language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
817
818
819
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
820
821
822

    Examples::

wangfei's avatar
wangfei committed
823
824
825
826
827
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForMaskedLM.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, masked_lm_labels=input_ids)
        loss, prediction_scores = outputs[:2]
828

thomwolf's avatar
thomwolf committed
829
    """
thomwolf's avatar
thomwolf committed
830
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
831
        super(BertForMaskedLM, self).__init__(config)
thomwolf's avatar
thomwolf committed
832

thomwolf's avatar
thomwolf committed
833
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
834
        self.cls = BertOnlyMLMHead(config)
thomwolf's avatar
thomwolf committed
835

thomwolf's avatar
thomwolf committed
836
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
837
838
839
840
841
842
        self.tie_weights()

    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
        """
thomwolf's avatar
thomwolf committed
843
844
        self._tie_or_clone_weights(self.cls.predictions.decoder,
                                   self.bert.embeddings.word_embeddings)
thomwolf's avatar
thomwolf committed
845

thomwolf's avatar
thomwolf committed
846
847
848
849
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
850
851

        sequence_output = outputs[0]
thomwolf's avatar
thomwolf committed
852
853
        prediction_scores = self.cls(sequence_output)

wangfei's avatar
wangfei committed
854
        outputs = (prediction_scores,) + outputs[2:]  # Add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
855
856
        if masked_lm_labels is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
857
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
858
            outputs = (masked_lm_loss,) + outputs
thomwolf's avatar
thomwolf committed
859
860

        return outputs  # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
861
862


thomwolf's avatar
thomwolf committed
863
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
thomwolf's avatar
thomwolf committed
864
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
865
class BertForNextSentencePrediction(BertPreTrainedModel):
866
    r"""
thomwolf's avatar
thomwolf committed
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
        **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
            Indices should be in ``[0, 1]``.
            ``0`` indicates sequence B is a continuation of sequence A,
            ``1`` indicates sequence B is a random sequence.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Next sequence prediction (classification) loss.
        **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
882
883
884
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
885
886
887

    Examples::

wangfei's avatar
wangfei committed
888
889
890
891
892
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        seq_relationship_scores = outputs[0]
893

thomwolf's avatar
thomwolf committed
894
    """
thomwolf's avatar
thomwolf committed
895
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
896
        super(BertForNextSentencePrediction, self).__init__(config)
thomwolf's avatar
thomwolf committed
897

thomwolf's avatar
thomwolf committed
898
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
899
        self.cls = BertOnlyNSPHead(config)
thomwolf's avatar
thomwolf committed
900

thomwolf's avatar
thomwolf committed
901
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
902

thomwolf's avatar
thomwolf committed
903
904
905
906
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
907
908
        pooled_output = outputs[1]

909
        seq_relationship_score = self.cls(pooled_output)
thomwolf's avatar
thomwolf committed
910

911
        outputs = (seq_relationship_score,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
912
913
        if next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
914
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
915
            outputs = (next_sentence_loss,) + outputs
thomwolf's avatar
thomwolf committed
916
917

        return outputs  # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
918
919


thomwolf's avatar
thomwolf committed
920
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
921
922
    the pooled output) e.g. for GLUE tasks. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
923
class BertForSequenceClassification(BertPreTrainedModel):
924
    r"""
thomwolf's avatar
thomwolf committed
925
926
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the sequence classification/regression loss.
LysandreJik's avatar
LysandreJik committed
927
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
928
929
930
931
932
933
934
935
936
937
938
939
            If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
            If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification (or regression if config.num_labels==1) loss.
        **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
940
941
942
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
943
944
945

    Examples::

wangfei's avatar
wangfei committed
946
947
948
949
950
951
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1]).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, logits = outputs[:2]
952

thomwolf's avatar
thomwolf committed
953
    """
thomwolf's avatar
thomwolf committed
954
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
955
        super(BertForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
956
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
957

thomwolf's avatar
thomwolf committed
958
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
959
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
960
        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
thomwolf's avatar
thomwolf committed
961

thomwolf's avatar
thomwolf committed
962
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
963

thomwolf's avatar
thomwolf committed
964
965
966
967
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
968
969
        pooled_output = outputs[1]

thomwolf's avatar
thomwolf committed
970
971
972
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

973
        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
974

thomwolf's avatar
thomwolf committed
975
        if labels is not None:
976
977
978
979
980
981
982
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
983
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
984
985

        return outputs  # (loss), logits, (hidden_states), (attentions)
986
987


thomwolf's avatar
thomwolf committed
988
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
989
990
    the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
    BERT_START_DOCSTRING)
thomwolf's avatar
thomwolf committed
991
class BertForMultipleChoice(BertPreTrainedModel):
992
    r"""
thomwolf's avatar
thomwolf committed
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:

            (a) For sequence pairs:

                ``tokens:         [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
                
                ``token_type_ids:   0   0  0    0    0     0       0   0   1  1  1  1   1   1``

            (b) For single sequences:

                ``tokens:         [CLS] the dog is hairy . [SEP]``
                
                ``token_type_ids:   0   0   0   0  0     0   0``
    
            Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Segment token indices to indicate first and second portions of the inputs.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
            corresponds to a `sentence B` token
            (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
thomwolf's avatar
thomwolf committed
1020
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
thomwolf's avatar
thomwolf committed
1021
1022
            Mask to avoid performing attention on padding token indices.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
thomwolf's avatar
thomwolf committed
1023
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
1024
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
1025
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
1026
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
1027
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the multiple choice classification loss.
            Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above)

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss.
        **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above).
            Classification scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1044
1045
1046
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1047
1048
1049

    Examples::

wangfei's avatar
wangfei committed
1050
1051
1052
1053
1054
1055
1056
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
        choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
        input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0)  # Batch size 1, 2 choices
        labels = torch.tensor(1).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, classification_scores = outputs[:2]
1057

1058
    """
thomwolf's avatar
thomwolf committed
1059
    def __init__(self, config):
1060
        super(BertForMultipleChoice, self).__init__(config)
thomwolf's avatar
thomwolf committed
1061

thomwolf's avatar
thomwolf committed
1062
        self.bert = BertModel(config)
1063
1064
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
thomwolf's avatar
thomwolf committed
1065

thomwolf's avatar
thomwolf committed
1066
        self.apply(self.init_weights)
1067

thomwolf's avatar
thomwolf committed
1068
1069
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1070
1071
        num_choices = input_ids.shape[1]

1072
        flat_input_ids = input_ids.view(-1, input_ids.size(-1))
thomwolf's avatar
thomwolf committed
1073
        flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1074
1075
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
thomwolf's avatar
thomwolf committed
1076
1077
        outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
                            attention_mask=flat_attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1078
1079
        pooled_output = outputs[1]

1080
1081
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
thomwolf's avatar
thomwolf committed
1082
        reshaped_logits = logits.view(-1, num_choices)
1083

1084
        outputs = (reshaped_logits,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
1085

1086
1087
1088
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
1089
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
1090
1091

        return outputs  # (loss), reshaped_logits, (hidden_states), (attentions)
1092
1093


thomwolf's avatar
thomwolf committed
1094
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
1095
1096
    the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1097
class BertForTokenClassification(BertPreTrainedModel):
1098
    r"""
thomwolf's avatar
thomwolf committed
1099
1100
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the token classification loss.
LysandreJik's avatar
LysandreJik committed
1101
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss.
        **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
            Classification scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1112
1113
1114
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1115
1116
1117

    Examples::

wangfei's avatar
wangfei committed
1118
1119
1120
1121
1122
1123
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForTokenClassification.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, scores = outputs[:2]
1124

1125
    """
thomwolf's avatar
thomwolf committed
1126
    def __init__(self, config):
1127
        super(BertForTokenClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
1128
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
1129

thomwolf's avatar
thomwolf committed
1130
        self.bert = BertModel(config)
1131
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
1132
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1133

thomwolf's avatar
thomwolf committed
1134
        self.apply(self.init_weights)
1135

thomwolf's avatar
thomwolf committed
1136
1137
1138
1139
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1140
1141
        sequence_output = outputs[0]

1142
1143
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
1144

1145
        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here
1146
1147
        if labels is not None:
            loss_fct = CrossEntropyLoss()
1148
1149
1150
1151
1152
1153
1154
1155
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1156
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
1157

thomwolf's avatar
thomwolf committed
1158
        return outputs  # (loss), scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
1159
1160


thomwolf's avatar
thomwolf committed
1161
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
thomwolf's avatar
thomwolf committed
1162
1163
    the hidden-states output to compute `span start logits` and `span end logits`). """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1164
class BertForQuestionAnswering(BertPreTrainedModel):
1165
    r"""
thomwolf's avatar
thomwolf committed
1166
        **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
thomwolf's avatar
thomwolf committed
1167
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
thomwolf's avatar
thomwolf committed
1168
1169
1170
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
thomwolf's avatar
thomwolf committed
1171
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
thomwolf's avatar
thomwolf committed
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
        **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
            Span-start scores (before SoftMax).
        **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
            Span-end scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1186
1187
1188
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1189
1190
1191

    Examples::

wangfei's avatar
wangfei committed
1192
1193
1194
1195
1196
1197
1198
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        start_positions = torch.tensor([1])
        end_positions = torch.tensor([3])
        outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
        loss, start_scores, end_scores = outputs[:2]
1199

thomwolf's avatar
thomwolf committed
1200
    """
thomwolf's avatar
thomwolf committed
1201
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1202
        super(BertForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1203
1204
1205
1206
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1207

thomwolf's avatar
thomwolf committed
1208
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1209

thomwolf's avatar
thomwolf committed
1210
1211
1212
1213
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
                end_positions=None, position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1214
1215
        sequence_output = outputs[0]

thomwolf's avatar
thomwolf committed
1216
1217
1218
1219
1220
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

1221
        outputs = (start_logits, end_logits,) + outputs[2:]
thomwolf's avatar
thomwolf committed
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1237
            outputs = (total_loss,) + outputs
thomwolf's avatar
thomwolf committed
1238
1239

        return outputs  # (loss), start_logits, end_logits, (hidden_states), (attentions)