modeling_bert.py 65.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
thomwolf's avatar
thomwolf committed
16
"""PyTorch BERT model. """
thomwolf's avatar
thomwolf committed
17

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function, unicode_literals
thomwolf's avatar
thomwolf committed
19
20
21

import json
import logging
thomwolf's avatar
thomwolf committed
22
23
24
25
import math
import os
import sys
from io import open
thomwolf's avatar
thomwolf committed
26
27
28

import torch
from torch import nn
29
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
30

thomwolf's avatar
thomwolf committed
31
32
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
                             prune_linear_layer, add_start_docstrings)
thomwolf's avatar
thomwolf committed
33
34
35

logger = logging.getLogger(__name__)

36
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
37
38
39
40
41
42
43
44
45
46
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
47
48
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
49
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
50
}
51

52
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
53
54
55
56
57
58
59
60
61
62
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
thomwolf's avatar
thomwolf committed
63
64
65
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
thomwolf's avatar
thomwolf committed
66
67
}

thomwolf's avatar
thomwolf committed
68

69
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
thomwolf's avatar
thomwolf committed
70
    """ Load tf checkpoints in a pytorch model.
71
    """
72
73
74
75
    try:
        import re
        import numpy as np
        import tensorflow as tf
thomwolf's avatar
thomwolf committed
76
    except ImportError:
Kevin Trebing's avatar
Kevin Trebing committed
77
        logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
78
79
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
80
    tf_path = os.path.abspath(tf_checkpoint_path)
thomwolf's avatar
thomwolf committed
81
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
82
83
84
85
86
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
87
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
88
89
90
91
92
93
94
95
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
96
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
thomwolf's avatar
thomwolf committed
97
            logger.info("Skipping {}".format("/".join(name)))
98
99
100
101
102
103
104
105
106
107
108
109
110
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel' or l[0] == 'gamma':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias' or l[0] == 'beta':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
111
112
            elif l[0] == 'squad':
                pointer = getattr(pointer, 'classifier')
113
            else:
114
115
116
                try:
                    pointer = getattr(pointer, l[0])
                except AttributeError:
thomwolf's avatar
thomwolf committed
117
                    logger.info("Skipping {}".format("/".join(name)))
118
                    continue
119
120
121
122
123
124
125
126
127
128
129
130
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
thomwolf's avatar
thomwolf committed
131
        logger.info("Initialize PyTorch weight {}".format(name))
132
133
134
135
        pointer.data = torch.from_numpy(array)
    return model


thomwolf's avatar
thomwolf committed
136
137
138
139
def gelu(x):
    """Implementation of the gelu activation function.
        For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
140
        Also see https://arxiv.org/abs/1606.08415
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
148
149
150
151
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


152
class BertConfig(PretrainedConfig):
153
    r"""
154
        :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
155
        `BertModel`.
156

thomwolf's avatar
thomwolf committed
157

158
        Arguments:
thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
179
            layer_norm_eps: The epsilon used by LayerNorm.
180
    """
thomwolf's avatar
thomwolf committed
181
    pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

    def __init__(self,
                 vocab_size_or_config_json_file=30522,
                 hidden_size=768,
                 num_hidden_layers=12,
                 num_attention_heads=12,
                 intermediate_size=3072,
                 hidden_act="gelu",
                 hidden_dropout_prob=0.1,
                 attention_probs_dropout_prob=0.1,
                 max_position_embeddings=512,
                 type_vocab_size=2,
                 initializer_range=0.02,
                 layer_norm_eps=1e-12,
                 **kwargs):
thomwolf's avatar
thomwolf committed
197
        super(BertConfig, self).__init__(**kwargs)
thomwolf's avatar
thomwolf committed
198
199
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
200
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
thomwolf's avatar
thomwolf committed
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.hidden_act = hidden_act
            self.intermediate_size = intermediate_size
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.initializer_range = initializer_range
216
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
217
218
219
220
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")

221

222

223
224
try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
雷打不动!'s avatar
雷打不动! committed
225
except (ImportError, AttributeError) as e:
226
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
227
228
229
230
231
232
233
234
235
236
237
238
239
240
    class BertLayerNorm(nn.Module):
        def __init__(self, hidden_size, eps=1e-12):
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(BertLayerNorm, self).__init__()
            self.weight = nn.Parameter(torch.ones(hidden_size))
            self.bias = nn.Parameter(torch.zeros(hidden_size))
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias
thomwolf's avatar
thomwolf committed
241
242
243
244
245
246

class BertEmbeddings(nn.Module):
    """Construct the embeddings from word, position and token_type embeddings.
    """
    def __init__(self, config):
        super(BertEmbeddings, self).__init__()
247
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
248
249
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
250
251
252

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
253
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
254
255
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

thomwolf's avatar
thomwolf committed
256
    def forward(self, input_ids, token_type_ids=None, position_ids=None):
thomwolf's avatar
thomwolf committed
257
        seq_length = input_ids.size(1)
thomwolf's avatar
thomwolf committed
258
259
260
        if position_ids is None:
            position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
thomwolf's avatar
thomwolf committed
261
262
263
264
265
266
267
268
269
270
271
272
273
274
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BertSelfAttention(nn.Module):
thomwolf's avatar
thomwolf committed
275
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
276
277
278
279
280
        super(BertSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
281
        self.output_attentions = config.output_attentions
282

thomwolf's avatar
thomwolf committed
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

298
    def forward(self, hidden_states, attention_mask, head_mask=None):
thomwolf's avatar
thomwolf committed
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

320
321
322
323
        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

thomwolf's avatar
thomwolf committed
324
        context_layer = torch.matmul(attention_probs, value_layer)
325

thomwolf's avatar
thomwolf committed
326
327
328
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
329

330
        outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
331
        return outputs
thomwolf's avatar
thomwolf committed
332
333
334
335
336
337


class BertSelfOutput(nn.Module):
    def __init__(self, config):
        super(BertSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
338
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
339
340
341
342
343
344
345
346
347
348
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertAttention(nn.Module):
thomwolf's avatar
thomwolf committed
349
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
350
        super(BertAttention, self).__init__()
thomwolf's avatar
thomwolf committed
351
        self.self = BertSelfAttention(config)
thomwolf's avatar
thomwolf committed
352
353
        self.output = BertSelfOutput(config)

thomwolf's avatar
thomwolf committed
354
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
355
356
        if len(heads) == 0:
            return
thomwolf's avatar
thomwolf committed
357
        mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
thomwolf's avatar
thomwolf committed
358
359
360
361
362
363
364
365
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
thomwolf's avatar
thomwolf committed
366
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
thomwolf's avatar
thomwolf committed
367
368
369
370
        # Update hyper params
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads

371
    def forward(self, input_tensor, attention_mask, head_mask=None):
372
373
        self_outputs = self.self(input_tensor, attention_mask, head_mask)
        attention_output = self.output(self_outputs[0], input_tensor)
374
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
375
        return outputs
thomwolf's avatar
thomwolf committed
376
377
378
379
380
381


class BertIntermediate(nn.Module):
    def __init__(self, config):
        super(BertIntermediate, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
thomwolf's avatar
thomwolf committed
382
383
384
385
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act
thomwolf's avatar
thomwolf committed
386
387
388
389
390
391
392
393
394
395
396

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BertOutput(nn.Module):
    def __init__(self, config):
        super(BertOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
397
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
398
399
400
401
402
403
404
405
406
407
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertLayer(nn.Module):
thomwolf's avatar
thomwolf committed
408
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
409
        super(BertLayer, self).__init__()
thomwolf's avatar
thomwolf committed
410
        self.attention = BertAttention(config)
thomwolf's avatar
thomwolf committed
411
412
413
        self.intermediate = BertIntermediate(config)
        self.output = BertOutput(config)

414
    def forward(self, hidden_states, attention_mask, head_mask=None):
415
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
thomwolf's avatar
thomwolf committed
416
417
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(attention_output)
thomwolf's avatar
thomwolf committed
418
        layer_output = self.output(intermediate_output, attention_output)
419
        outputs = (layer_output,) + attention_outputs[1:]  # add attentions if we output them
420
        return outputs
thomwolf's avatar
thomwolf committed
421
422
423


class BertEncoder(nn.Module):
thomwolf's avatar
thomwolf committed
424
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
425
        super(BertEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
426
427
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
428
        self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
thomwolf's avatar
thomwolf committed
429

430
    def forward(self, hidden_states, attention_mask, head_mask=None):
431
432
        all_hidden_states = ()
        all_attentions = ()
433
        for i, layer_module in enumerate(self.layer):
434
            if self.output_hidden_states:
435
                all_hidden_states = all_hidden_states + (hidden_states,)
436
437
438
439

            layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
            hidden_states = layer_outputs[0]

thomwolf's avatar
thomwolf committed
440
            if self.output_attentions:
441
                all_attentions = all_attentions + (layer_outputs[1],)
442
443
444

        # Add last layer
        if self.output_hidden_states:
445
            all_hidden_states = all_hidden_states + (hidden_states,)
446

447
        outputs = (hidden_states,)
448
        if self.output_hidden_states:
449
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
450
        if self.output_attentions:
451
            outputs = outputs + (all_attentions,)
452
        return outputs  # outputs, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473


class BertPooler(nn.Module):
    def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output


class BertPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super(BertPredictionHeadTransform, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
474
475
476
477
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
478
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
479
480
481
482
483
484
485
486
487

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


class BertLMPredictionHead(nn.Module):
thomwolf's avatar
thomwolf committed
488
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
489
490
491
492
493
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
thomwolf's avatar
thomwolf committed
494
495
        self.decoder = nn.Linear(config.hidden_size,
                                 config.vocab_size,
thomwolf's avatar
thomwolf committed
496
                                 bias=False)
497

thomwolf's avatar
thomwolf committed
498
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
thomwolf's avatar
thomwolf committed
499
500
501
502
503
504
505
506

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states) + self.bias
        return hidden_states


class BertOnlyMLMHead(nn.Module):
thomwolf's avatar
thomwolf committed
507
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
508
        super(BertOnlyMLMHead, self).__init__()
thomwolf's avatar
thomwolf committed
509
        self.predictions = BertLMPredictionHead(config)
thomwolf's avatar
thomwolf committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class BertOnlyNSPHead(nn.Module):
    def __init__(self, config):
        super(BertOnlyNSPHead, self).__init__()
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, pooled_output):
        seq_relationship_score = self.seq_relationship(pooled_output)
        return seq_relationship_score


class BertPreTrainingHeads(nn.Module):
thomwolf's avatar
thomwolf committed
527
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
528
        super(BertPreTrainingHeads, self).__init__()
thomwolf's avatar
thomwolf committed
529
        self.predictions = BertLMPredictionHead(config)
thomwolf's avatar
thomwolf committed
530
531
532
533
534
535
536
537
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, sequence_output, pooled_output):
        prediction_scores = self.predictions(sequence_output)
        seq_relationship_score = self.seq_relationship(pooled_output)
        return prediction_scores, seq_relationship_score


538
class BertPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
539
540
541
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
542
    config_class = BertConfig
543
    pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
544
545
546
    load_tf_weights = load_tf_weights_in_bert
    base_model_prefix = "bert"

547
548
549
    def __init__(self, *inputs, **kwargs):
        super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)

thomwolf's avatar
thomwolf committed
550
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
551
552
553
554
555
556
557
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
Li Dong's avatar
Li Dong committed
558
559
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
560
561
562
563
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()


thomwolf's avatar
thomwolf committed
564
565
566
567
568
BERT_START_DOCSTRING = r"""    The BERT model was proposed in
    `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
    by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
    pre-trained using a combination of masked language modeling objective and next sentence prediction
    on a large corpus comprising the Toronto Book Corpus and Wikipedia.
569

thomwolf's avatar
thomwolf committed
570
571
    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.
thomwolf's avatar
thomwolf committed
572

thomwolf's avatar
thomwolf committed
573
574
    .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
        https://arxiv.org/abs/1810.04805
thomwolf's avatar
thomwolf committed
575

thomwolf's avatar
thomwolf committed
576
577
    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module
578

thomwolf's avatar
thomwolf committed
579
    Parameters:
580
581
582
        config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model. 
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
"""

BERT_INPUTS_DOCSTRING = r"""
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:

            (a) For sequence pairs:

                ``tokens:         [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
                
                ``token_type_ids:   0   0  0    0    0     0       0   0   1  1  1  1   1   1``

            (b) For single sequences:

                ``tokens:         [CLS] the dog is hairy . [SEP]``
                
                ``token_type_ids:   0   0   0   0  0     0   0``
    
            Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
thomwolf's avatar
thomwolf committed
606
607
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
608
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
609
610
611
612
613
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Segment token indices to indicate first and second portions of the inputs.
            Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
            corresponds to a `sentence B` token
            (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
thomwolf's avatar
thomwolf committed
614
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
615
            Mask to avoid performing attention on padding token indices.
thomwolf's avatar
thomwolf committed
616
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
617
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
618
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
619
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
620
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
621
622
623
624
625
626
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
                      BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
627
    r"""
thomwolf's avatar
thomwolf committed
628
629
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
thomwolf's avatar
thomwolf committed
630
631
632
633
634
635
636
637
            Sequence of hidden-states at the output of the last layer of the model.
        **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
            Last layer hidden-state of the first token of the sequence (classification token)
            further processed by a Linear layer and a Tanh activation function. The Linear
            layer weights are trained from the next sentence prediction (classification)
            objective during Bert pretraining. This output is usually *not* a good summary
            of the semantic content of the input, you're often better with averaging or pooling
            the sequence of hidden-states for the whole input sequence.
thomwolf's avatar
thomwolf committed
638
639
640
641
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
642
643
644
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
645
646
647

    Examples::

wangfei's avatar
wangfei committed
648
649
650
651
652
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertModel.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
thomwolf's avatar
thomwolf committed
653
654

    """
thomwolf's avatar
thomwolf committed
655
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
656
        super(BertModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
657

thomwolf's avatar
thomwolf committed
658
        self.embeddings = BertEmbeddings(config)
thomwolf's avatar
thomwolf committed
659
        self.encoder = BertEncoder(config)
thomwolf's avatar
thomwolf committed
660
        self.pooler = BertPooler(config)
thomwolf's avatar
thomwolf committed
661

thomwolf's avatar
thomwolf committed
662
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
663

thomwolf's avatar
thomwolf committed
664
665
666
667
    def _resize_token_embeddings(self, new_num_tokens):
        old_embeddings = self.embeddings.word_embeddings
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.embeddings.word_embeddings = new_embeddings
thomwolf's avatar
thomwolf committed
668
        return self.embeddings.word_embeddings
thomwolf's avatar
thomwolf committed
669

thomwolf's avatar
thomwolf committed
670
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
671
672
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
thomwolf's avatar
thomwolf committed
673
            See base class PreTrainedModel
thomwolf's avatar
thomwolf committed
674
675
676
677
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

thomwolf's avatar
thomwolf committed
678
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

thomwolf's avatar
thomwolf committed
699
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
700
        # 1.0 in head_mask indicate we keep the head
thomwolf's avatar
thomwolf committed
701
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
702
703
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
thomwolf's avatar
thomwolf committed
704
705
        if head_mask is not None:
            if head_mask.dim() == 1:
706
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
707
                head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
708
            elif head_mask.dim() == 2:
709
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
thomwolf's avatar
thomwolf committed
710
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
711
712
        else:
            head_mask = [None] * self.config.num_hidden_layers
thomwolf's avatar
thomwolf committed
713

thomwolf's avatar
thomwolf committed
714
        embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
715
716
717
718
        encoder_outputs = self.encoder(embedding_output,
                                       extended_attention_mask,
                                       head_mask=head_mask)
        sequence_output = encoder_outputs[0]
thomwolf's avatar
thomwolf committed
719
        pooled_output = self.pooler(sequence_output)
720

721
        outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]  # add hidden_states and attentions if they are here
722
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
723
724


thomwolf's avatar
thomwolf committed
725
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
thomwolf's avatar
thomwolf committed
726
727
    a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
728
class BertForPreTraining(BertPreTrainedModel):
729
    r"""
thomwolf's avatar
thomwolf committed
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
        **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the masked language modeling loss.
            Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
            Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
            in ``[0, ..., config.vocab_size]``
        **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
            Indices should be in ``[0, 1]``.
            ``0`` indicates sequence B is a continuation of sequence A,
            ``1`` indicates sequence B is a random sequence.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
752
753
754
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
755
756
757

    Examples::

wangfei's avatar
wangfei committed
758
759
760
761
762
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForPreTraining.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        prediction_scores, seq_relationship_scores = outputs[:2]
763

thomwolf's avatar
thomwolf committed
764
    """
thomwolf's avatar
thomwolf committed
765
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
766
        super(BertForPreTraining, self).__init__(config)
767

thomwolf's avatar
thomwolf committed
768
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
769
        self.cls = BertPreTrainingHeads(config)
thomwolf's avatar
thomwolf committed
770

thomwolf's avatar
thomwolf committed
771
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
772
773
774
775
776
777
        self.tie_weights()

    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
        """
thomwolf's avatar
thomwolf committed
778
779
        self._tie_or_clone_weights(self.cls.predictions.decoder,
                                   self.bert.embeddings.word_embeddings)
thomwolf's avatar
thomwolf committed
780

thomwolf's avatar
thomwolf committed
781
782
783
784
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                next_sentence_label=None, position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
785
786

        sequence_output, pooled_output = outputs[:2]
thomwolf's avatar
thomwolf committed
787
788
        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

789
        outputs = (prediction_scores, seq_relationship_score,) + outputs[2:]  # add hidden states and attention if they are here
790

thomwolf's avatar
thomwolf committed
791
792
        if masked_lm_labels is not None and next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
793
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
794
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
795
            total_loss = masked_lm_loss + next_sentence_loss
796
            outputs = (total_loss,) + outputs
797
798

        return outputs  # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
799
800


thomwolf's avatar
thomwolf committed
801
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
thomwolf's avatar
thomwolf committed
802
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
803
class BertForMaskedLM(BertPreTrainedModel):
804
    r"""
thomwolf's avatar
thomwolf committed
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
        **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the masked language modeling loss.
            Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
            Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
            in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Masked language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
820
821
822
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
823
824
825

    Examples::

wangfei's avatar
wangfei committed
826
827
828
829
830
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForMaskedLM.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, masked_lm_labels=input_ids)
        loss, prediction_scores = outputs[:2]
831

thomwolf's avatar
thomwolf committed
832
    """
thomwolf's avatar
thomwolf committed
833
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
834
        super(BertForMaskedLM, self).__init__(config)
thomwolf's avatar
thomwolf committed
835

thomwolf's avatar
thomwolf committed
836
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
837
        self.cls = BertOnlyMLMHead(config)
thomwolf's avatar
thomwolf committed
838

thomwolf's avatar
thomwolf committed
839
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
840
841
842
843
844
845
        self.tie_weights()

    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
        """
thomwolf's avatar
thomwolf committed
846
847
        self._tie_or_clone_weights(self.cls.predictions.decoder,
                                   self.bert.embeddings.word_embeddings)
thomwolf's avatar
thomwolf committed
848

thomwolf's avatar
thomwolf committed
849
850
851
852
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
853
854

        sequence_output = outputs[0]
thomwolf's avatar
thomwolf committed
855
856
        prediction_scores = self.cls(sequence_output)

wangfei's avatar
wangfei committed
857
        outputs = (prediction_scores,) + outputs[2:]  # Add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
858
859
        if masked_lm_labels is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
860
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
861
            outputs = (masked_lm_loss,) + outputs
thomwolf's avatar
thomwolf committed
862
863

        return outputs  # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
864
865


thomwolf's avatar
thomwolf committed
866
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
thomwolf's avatar
thomwolf committed
867
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
868
class BertForNextSentencePrediction(BertPreTrainedModel):
869
    r"""
thomwolf's avatar
thomwolf committed
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
        **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
            Indices should be in ``[0, 1]``.
            ``0`` indicates sequence B is a continuation of sequence A,
            ``1`` indicates sequence B is a random sequence.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Next sequence prediction (classification) loss.
        **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
885
886
887
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
888
889
890

    Examples::

wangfei's avatar
wangfei committed
891
892
893
894
895
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        seq_relationship_scores = outputs[0]
896

thomwolf's avatar
thomwolf committed
897
    """
thomwolf's avatar
thomwolf committed
898
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
899
        super(BertForNextSentencePrediction, self).__init__(config)
thomwolf's avatar
thomwolf committed
900

thomwolf's avatar
thomwolf committed
901
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
902
        self.cls = BertOnlyNSPHead(config)
thomwolf's avatar
thomwolf committed
903

thomwolf's avatar
thomwolf committed
904
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
905

thomwolf's avatar
thomwolf committed
906
907
908
909
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
910
911
        pooled_output = outputs[1]

912
        seq_relationship_score = self.cls(pooled_output)
thomwolf's avatar
thomwolf committed
913

914
        outputs = (seq_relationship_score,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
915
916
        if next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
917
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
918
            outputs = (next_sentence_loss,) + outputs
thomwolf's avatar
thomwolf committed
919
920

        return outputs  # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
921
922


thomwolf's avatar
thomwolf committed
923
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
924
925
    the pooled output) e.g. for GLUE tasks. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
926
class BertForSequenceClassification(BertPreTrainedModel):
927
    r"""
thomwolf's avatar
thomwolf committed
928
929
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the sequence classification/regression loss.
LysandreJik's avatar
LysandreJik committed
930
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
931
932
933
934
935
936
937
938
939
940
941
942
            If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
            If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification (or regression if config.num_labels==1) loss.
        **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
943
944
945
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
946
947
948

    Examples::

wangfei's avatar
wangfei committed
949
950
951
952
953
954
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1]).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, logits = outputs[:2]
955

thomwolf's avatar
thomwolf committed
956
    """
thomwolf's avatar
thomwolf committed
957
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
958
        super(BertForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
959
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
960

thomwolf's avatar
thomwolf committed
961
        self.bert = BertModel(config)
thomwolf's avatar
thomwolf committed
962
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
963
        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
thomwolf's avatar
thomwolf committed
964

thomwolf's avatar
thomwolf committed
965
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
966

thomwolf's avatar
thomwolf committed
967
968
969
970
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
971
972
        pooled_output = outputs[1]

thomwolf's avatar
thomwolf committed
973
974
975
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

976
        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
977

thomwolf's avatar
thomwolf committed
978
        if labels is not None:
979
980
981
982
983
984
985
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
986
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
987
988

        return outputs  # (loss), logits, (hidden_states), (attentions)
989
990


thomwolf's avatar
thomwolf committed
991
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
992
993
    the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
    BERT_START_DOCSTRING)
thomwolf's avatar
thomwolf committed
994
class BertForMultipleChoice(BertPreTrainedModel):
995
    r"""
thomwolf's avatar
thomwolf committed
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:

            (a) For sequence pairs:

                ``tokens:         [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
                
                ``token_type_ids:   0   0  0    0    0     0       0   0   1  1  1  1   1   1``

            (b) For single sequences:

                ``tokens:         [CLS] the dog is hairy . [SEP]``
                
                ``token_type_ids:   0   0   0   0  0     0   0``
    
            Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Segment token indices to indicate first and second portions of the inputs.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
            corresponds to a `sentence B` token
            (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
thomwolf's avatar
thomwolf committed
1023
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
thomwolf's avatar
thomwolf committed
1024
1025
            Mask to avoid performing attention on padding token indices.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
thomwolf's avatar
thomwolf committed
1026
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
1027
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
1028
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
1029
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
1030
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the multiple choice classification loss.
            Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above)

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss.
        **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above).
            Classification scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1047
1048
1049
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1050
1051
1052

    Examples::

wangfei's avatar
wangfei committed
1053
1054
1055
1056
1057
1058
1059
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
        choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
        input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0)  # Batch size 1, 2 choices
        labels = torch.tensor(1).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, classification_scores = outputs[:2]
1060

1061
    """
thomwolf's avatar
thomwolf committed
1062
    def __init__(self, config):
1063
        super(BertForMultipleChoice, self).__init__(config)
thomwolf's avatar
thomwolf committed
1064

thomwolf's avatar
thomwolf committed
1065
        self.bert = BertModel(config)
1066
1067
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
thomwolf's avatar
thomwolf committed
1068

thomwolf's avatar
thomwolf committed
1069
        self.apply(self.init_weights)
1070

thomwolf's avatar
thomwolf committed
1071
1072
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1073
1074
        num_choices = input_ids.shape[1]

1075
        flat_input_ids = input_ids.view(-1, input_ids.size(-1))
thomwolf's avatar
thomwolf committed
1076
        flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1077
1078
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
thomwolf's avatar
thomwolf committed
1079
1080
        outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
                            attention_mask=flat_attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1081
1082
        pooled_output = outputs[1]

1083
1084
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
thomwolf's avatar
thomwolf committed
1085
        reshaped_logits = logits.view(-1, num_choices)
1086

1087
        outputs = (reshaped_logits,) + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
1088

1089
1090
1091
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
1092
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
1093
1094

        return outputs  # (loss), reshaped_logits, (hidden_states), (attentions)
1095
1096


thomwolf's avatar
thomwolf committed
1097
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
thomwolf's avatar
thomwolf committed
1098
1099
    the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1100
class BertForTokenClassification(BertPreTrainedModel):
1101
    r"""
thomwolf's avatar
thomwolf committed
1102
1103
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for computing the token classification loss.
LysandreJik's avatar
LysandreJik committed
1104
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss.
        **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
            Classification scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1115
1116
1117
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1118
1119
1120

    Examples::

wangfei's avatar
wangfei committed
1121
1122
1123
1124
1125
1126
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForTokenClassification.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, scores = outputs[:2]
1127

1128
    """
thomwolf's avatar
thomwolf committed
1129
    def __init__(self, config):
1130
        super(BertForTokenClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
1131
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
1132

thomwolf's avatar
thomwolf committed
1133
        self.bert = BertModel(config)
1134
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
1135
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1136

thomwolf's avatar
thomwolf committed
1137
        self.apply(self.init_weights)
1138

thomwolf's avatar
thomwolf committed
1139
1140
1141
1142
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
                position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1143
1144
        sequence_output = outputs[0]

1145
1146
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
1147

1148
        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here
1149
1150
        if labels is not None:
            loss_fct = CrossEntropyLoss()
1151
1152
1153
1154
1155
1156
1157
1158
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1159
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
1160

thomwolf's avatar
thomwolf committed
1161
        return outputs  # (loss), scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
1162
1163


thomwolf's avatar
thomwolf committed
1164
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
thomwolf's avatar
thomwolf committed
1165
1166
    the hidden-states output to compute `span start logits` and `span end logits`). """,
    BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1167
class BertForQuestionAnswering(BertPreTrainedModel):
1168
    r"""
thomwolf's avatar
thomwolf committed
1169
        **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
thomwolf's avatar
thomwolf committed
1170
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
thomwolf's avatar
thomwolf committed
1171
1172
1173
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
thomwolf's avatar
thomwolf committed
1174
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
thomwolf's avatar
thomwolf committed
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
        **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
            Span-start scores (before SoftMax).
        **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
            Span-end scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1189
1190
1191
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1192
1193
1194

    Examples::

wangfei's avatar
wangfei committed
1195
1196
1197
1198
1199
1200
1201
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        start_positions = torch.tensor([1])
        end_positions = torch.tensor([3])
        outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
        loss, start_scores, end_scores = outputs[:2]
1202

thomwolf's avatar
thomwolf committed
1203
    """
thomwolf's avatar
thomwolf committed
1204
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1205
        super(BertForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1206
1207
1208
1209
        self.num_labels = config.num_labels

        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
thomwolf's avatar
thomwolf committed
1210

thomwolf's avatar
thomwolf committed
1211
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1212

thomwolf's avatar
thomwolf committed
1213
1214
1215
1216
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
                end_positions=None, position_ids=None, head_mask=None):
        outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                            attention_mask=attention_mask, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1217
1218
        sequence_output = outputs[0]

thomwolf's avatar
thomwolf committed
1219
1220
1221
1222
1223
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

1224
        outputs = (start_logits, end_logits,) + outputs[2:]
thomwolf's avatar
thomwolf committed
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1240
            outputs = (total_loss,) + outputs
thomwolf's avatar
thomwolf committed
1241
1242

        return outputs  # (loss), start_logits, end_logits, (hidden_states), (attentions)