modeling.py 64.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function, unicode_literals
thomwolf's avatar
thomwolf committed
19
20
21
22

import copy
import json
import logging
thomwolf's avatar
thomwolf committed
23
24
25
26
import math
import os
import sys
from io import open
thomwolf's avatar
thomwolf committed
27
28
29

import torch
from torch import nn
30
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
31

32
from .file_utils import cached_path
33
from .model_utils import WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel, prune_linear_layer
thomwolf's avatar
thomwolf committed
34
35
36
37

logger = logging.getLogger(__name__)

PRETRAINED_MODEL_ARCHIVE_MAP = {
38
39
40
41
42
43
44
45
46
47
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
48
49
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
50
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
51
52
53
54
55
56
57
58
59
60
61
62
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
    'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
    'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
    'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
    'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
    'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
    'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
    'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
    'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
thomwolf's avatar
thomwolf committed
63
64
65
    'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
    'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
    'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
thomwolf's avatar
thomwolf committed
66
67
}

thomwolf's avatar
thomwolf committed
68

69
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
70
71
    """ Load tf checkpoints in a pytorch model
    """
72
73
74
75
    try:
        import re
        import numpy as np
        import tensorflow as tf
thomwolf's avatar
thomwolf committed
76
    except ImportError:
77
78
79
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    tf_path = os.path.abspath(tf_checkpoint_path)
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split('/')
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
96
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
97
98
99
100
101
102
103
104
105
106
107
108
109
110
            print("Skipping {}".format("/".join(name)))
            continue
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
                l = re.split(r'_(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'kernel' or l[0] == 'gamma':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'output_bias' or l[0] == 'beta':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'output_weights':
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
111
112
            elif l[0] == 'squad':
                pointer = getattr(pointer, 'classifier')
113
            else:
114
115
116
117
118
                try:
                    pointer = getattr(pointer, l[0])
                except AttributeError:
                    print("Skipping {}".format("/".join(name)))
                    continue
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        if m_name[-11:] == '_embeddings':
            pointer = getattr(pointer, 'weight')
        elif m_name == 'kernel':
            array = np.transpose(array)
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model


thomwolf's avatar
thomwolf committed
136
137
138
139
def gelu(x):
    """Implementation of the gelu activation function.
        For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
140
        Also see https://arxiv.org/abs/1606.08415
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
148
149
150
151
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


152
class BertConfig(PretrainedConfig):
thomwolf's avatar
thomwolf committed
153
154
    """Configuration class to store the configuration of a `BertModel`.
    """
155
156
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP

thomwolf's avatar
thomwolf committed
157
158
159
160
161
162
163
164
165
166
167
    def __init__(self,
                 vocab_size_or_config_json_file,
                 hidden_size=768,
                 num_hidden_layers=12,
                 num_attention_heads=12,
                 intermediate_size=3072,
                 hidden_act="gelu",
                 hidden_dropout_prob=0.1,
                 attention_probs_dropout_prob=0.1,
                 max_position_embeddings=512,
                 type_vocab_size=2,
168
                 initializer_range=0.02,
169
170
                 layer_norm_eps=1e-12,
                 finetuning_task=None):
thomwolf's avatar
thomwolf committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        """Constructs BertConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
194
            layer_norm_eps: The epsilon used by LayerNorm.
195
            finetuning_task: name of the glue task on which the model was fine-tuned if any
thomwolf's avatar
thomwolf committed
196
        """
thomwolf's avatar
thomwolf committed
197
198
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
199
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
thomwolf's avatar
thomwolf committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.hidden_act = hidden_act
            self.intermediate_size = intermediate_size
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.initializer_range = initializer_range
215
            self.layer_norm_eps = layer_norm_eps
216
            self.finetuning_task = finetuning_task
thomwolf's avatar
thomwolf committed
217
218
219
220
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")

221

222
223
224
try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
225
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    class BertLayerNorm(nn.Module):
        def __init__(self, hidden_size, eps=1e-12):
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(BertLayerNorm, self).__init__()
            self.weight = nn.Parameter(torch.ones(hidden_size))
            self.bias = nn.Parameter(torch.zeros(hidden_size))
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias
thomwolf's avatar
thomwolf committed
240
241
242
243
244
245

class BertEmbeddings(nn.Module):
    """Construct the embeddings from word, position and token_type embeddings.
    """
    def __init__(self, config):
        super(BertEmbeddings, self).__init__()
246
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
247
248
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
249
250
251

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
252
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, input_ids, token_type_ids=None):
        seq_length = input_ids.size(1)
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BertSelfAttention(nn.Module):
273
    def __init__(self, config, output_attentions=False):
thomwolf's avatar
thomwolf committed
274
275
276
277
278
        super(BertSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
279
        self.output_attentions = output_attentions
280

thomwolf's avatar
thomwolf committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

296
    def forward(self, hidden_states, attention_mask, head_mask=None):
thomwolf's avatar
thomwolf committed
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

318
319
320
321
        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

thomwolf's avatar
thomwolf committed
322
        context_layer = torch.matmul(attention_probs, value_layer)
323

thomwolf's avatar
thomwolf committed
324
325
326
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
327
328
329

        outputs = [context_layer, attention_probs] if self.output_attentions else [context_layer]
        return outputs
thomwolf's avatar
thomwolf committed
330
331
332
333
334
335


class BertSelfOutput(nn.Module):
    def __init__(self, config):
        super(BertSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
336
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
337
338
339
340
341
342
343
344
345
346
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertAttention(nn.Module):
347
    def __init__(self, config, output_attentions=False):
thomwolf's avatar
thomwolf committed
348
        super(BertAttention, self).__init__()
thomwolf's avatar
thomwolf committed
349
        self.output_attentions = output_attentions
350
        self.self = BertSelfAttention(config, output_attentions=output_attentions)
thomwolf's avatar
thomwolf committed
351
352
        self.output = BertSelfOutput(config)

thomwolf's avatar
thomwolf committed
353
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
354
355
        if len(heads) == 0:
            return
thomwolf's avatar
thomwolf committed
356
        mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
thomwolf's avatar
thomwolf committed
357
358
359
360
361
362
363
364
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
thomwolf's avatar
thomwolf committed
365
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
thomwolf's avatar
thomwolf committed
366
367
368
369
        # Update hyper params
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads

370
    def forward(self, input_tensor, attention_mask, head_mask=None):
371
372
373
374
        self_outputs = self.self(input_tensor, attention_mask, head_mask)
        attention_output = self.output(self_outputs[0], input_tensor)
        outputs = [attention_output] + self_outputs[1:]  # add attentions if we output them
        return outputs
thomwolf's avatar
thomwolf committed
375
376
377
378
379
380


class BertIntermediate(nn.Module):
    def __init__(self, config):
        super(BertIntermediate, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
thomwolf's avatar
thomwolf committed
381
382
383
384
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act
thomwolf's avatar
thomwolf committed
385
386
387
388
389
390
391
392
393
394
395

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BertOutput(nn.Module):
    def __init__(self, config):
        super(BertOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
396
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
397
398
399
400
401
402
403
404
405
406
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class BertLayer(nn.Module):
407
    def __init__(self, config, output_attentions=False):
thomwolf's avatar
thomwolf committed
408
        super(BertLayer, self).__init__()
thomwolf's avatar
thomwolf committed
409
        self.output_attentions = output_attentions
410
        self.attention = BertAttention(config, output_attentions=output_attentions)
thomwolf's avatar
thomwolf committed
411
412
413
        self.intermediate = BertIntermediate(config)
        self.output = BertOutput(config)

414
    def forward(self, hidden_states, attention_mask, head_mask=None):
415
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
thomwolf's avatar
thomwolf committed
416
417
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(attention_output)
thomwolf's avatar
thomwolf committed
418
        layer_output = self.output(intermediate_output, attention_output)
419
420
        outputs = [layer_output] + attention_outputs[1:]  # add attentions if we output them
        return outputs
thomwolf's avatar
thomwolf committed
421
422
423


class BertEncoder(nn.Module):
424
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
425
        super(BertEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
426
        self.output_attentions = output_attentions
427
428
        self.output_hidden_states = output_hidden_states
        layer = BertLayer(config, output_attentions=output_attentions)
429
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
thomwolf's avatar
thomwolf committed
430

431
432
    def forward(self, hidden_states, attention_mask, head_mask=None):
        all_hidden_states = []
thomwolf's avatar
thomwolf committed
433
        all_attentions = []
434
        for i, layer_module in enumerate(self.layer):
435
436
437
438
439
440
            if self.output_hidden_states:
                all_hidden_states.append(hidden_states)

            layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
            hidden_states = layer_outputs[0]

thomwolf's avatar
thomwolf committed
441
            if self.output_attentions:
442
443
444
445
446
447
448
449
450
                all_attentions.append(layer_outputs[1])

        # Add last layer
        if self.output_hidden_states:
            all_hidden_states.append(hidden_states)

        outputs = [hidden_states]
        if self.output_hidden_states:
            outputs.append(all_hidden_states)
thomwolf's avatar
thomwolf committed
451
        if self.output_attentions:
452
453
            outputs.append(all_attentions)
        return outputs  # outputs, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474


class BertPooler(nn.Module):
    def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output


class BertPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super(BertPredictionHeadTransform, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
475
476
477
478
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
479
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


class BertLMPredictionHead(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
                                 bert_model_embedding_weights.size(0),
                                 bias=False)
        self.decoder.weight = bert_model_embedding_weights
        self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states) + self.bias
        return hidden_states


class BertOnlyMLMHead(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertOnlyMLMHead, self).__init__()
        self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class BertOnlyNSPHead(nn.Module):
    def __init__(self, config):
        super(BertOnlyNSPHead, self).__init__()
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, pooled_output):
        seq_relationship_score = self.seq_relationship(pooled_output)
        return seq_relationship_score


class BertPreTrainingHeads(nn.Module):
    def __init__(self, config, bert_model_embedding_weights):
        super(BertPreTrainingHeads, self).__init__()
        self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, sequence_output, pooled_output):
        prediction_scores = self.predictions(sequence_output)
        seq_relationship_score = self.seq_relationship(pooled_output)
        return prediction_scores, seq_relationship_score


539
class BertPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
540
541
542
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
543
544
545
546
547
548
549
550
    config_class = BertConfig
    pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP
    load_tf_weights = load_tf_weights_in_bert
    base_model_prefix = "bert"

    def __init__(self, *inputs, **kwargs):
        super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
551

thomwolf's avatar
thomwolf committed
552
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
553
554
555
556
557
558
559
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
Li Dong's avatar
Li Dong committed
560
561
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
562
563
564
565
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()


thomwolf's avatar
thomwolf committed
566
class BertModel(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
567
568
569
    """BERT model ("Bidirectional Embedding Representations from a Transformer").

    Params:
570
571
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
572
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
573
574
575
576

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
577
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
578
579
580
581
582
583
584
585
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
586
587
588
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.

thomwolf's avatar
thomwolf committed
589
590
591
592
593
594
595

    Outputs: Tuple of (encoded_layers, pooled_output)
        `encoded_layers`: controled by `output_all_encoded_layers` argument:
            - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
                of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
                encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
            - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
596
                to the last attention block of shape [batch_size, sequence_length, hidden_size],
thomwolf's avatar
thomwolf committed
597
598
        `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
            classifier pretrained on top of the hidden state associated to the first character of the
thomwolf's avatar
thomwolf committed
599
            input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
thomwolf's avatar
thomwolf committed
600
601
602
603
604
605

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
606
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
607

thomwolf's avatar
thomwolf committed
608
609
    config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
610
611
612
613
614

    model = modeling.BertModel(config=config)
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
    ```
    """
615
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
616
        super(BertModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
617
        self.output_attentions = output_attentions
618
        self.output_hidden_states = output_hidden_states
thomwolf's avatar
thomwolf committed
619
        self.embeddings = BertEmbeddings(config)
thomwolf's avatar
thomwolf committed
620
        self.encoder = BertEncoder(config, output_attentions=output_attentions,
621
                                           output_hidden_states=output_hidden_states)
thomwolf's avatar
thomwolf committed
622
        self.pooler = BertPooler(config)
thomwolf's avatar
thomwolf committed
623
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
624

thomwolf's avatar
thomwolf committed
625
626
627
628
629
630
631
    def prune_heads(self, heads_to_prune):
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

632
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

thomwolf's avatar
thomwolf committed
653
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
654
        # 1.0 in head_mask indicate we keep the head
thomwolf's avatar
thomwolf committed
655
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
656
657
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
thomwolf's avatar
thomwolf committed
658
659
        if head_mask is not None:
            if head_mask.dim() == 1:
660
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
661
                head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
662
            elif head_mask.dim() == 2:
663
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
thomwolf's avatar
thomwolf committed
664
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
665
666
        else:
            head_mask = [None] * self.config.num_hidden_layers
thomwolf's avatar
thomwolf committed
667

thomwolf's avatar
thomwolf committed
668
        embedding_output = self.embeddings(input_ids, token_type_ids)
669
670
671
672
        encoder_outputs = self.encoder(embedding_output,
                                       extended_attention_mask,
                                       head_mask=head_mask)
        sequence_output = encoder_outputs[0]
thomwolf's avatar
thomwolf committed
673
        pooled_output = self.pooler(sequence_output)
674
675
676

        outputs = [sequence_output, pooled_output] + encoder_outputs[1:]  # add hidden_states and attentions if they are here
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
677
678


thomwolf's avatar
thomwolf committed
679
class BertForPreTraining(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
680
681
682
683
684
685
    """BERT model with pre-training heads.
    This module comprises the BERT model followed by the two pre-training heads:
        - the masked language modeling head, and
        - the next sentence classification head.

    Params:
686
687
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
688
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
689
690
691
692

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
693
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
694
695
696
697
698
699
700
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
701
        `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
thomwolf's avatar
thomwolf committed
702
703
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
704
        `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
thomwolf's avatar
thomwolf committed
705
706
            with indices selected in [0, 1].
            0 => next sentence is the continuation, 1 => next sentence is a random sentence.
707
708
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
709
710
711
712
713
714
715

    Outputs:
        if `masked_lm_labels` and `next_sentence_label` are not `None`:
            Outputs the total_loss which is the sum of the masked language modeling loss and the next
            sentence classification loss.
        if `masked_lm_labels` or `next_sentence_label` is `None`:
            Outputs a tuple comprising
716
717
            - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
            - the next sentence classification logits of shape [batch_size, 2].
thomwolf's avatar
thomwolf committed
718
719
720
721
722
723

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
724
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
725

thomwolf's avatar
thomwolf committed
726
727
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
728
729
730
731
732

    model = BertForPreTraining(config)
    masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
733
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
734
        super(BertForPreTraining, self).__init__(config)
735
        self.output_attentions = output_attentions
736
737
        self.output_hidden_states = output_hidden_states

thomwolf's avatar
thomwolf committed
738
        self.bert = BertModel(config, output_attentions=output_attentions,
739
                                      output_hidden_states=output_hidden_states)
thomwolf's avatar
thomwolf committed
740
        self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
thomwolf's avatar
thomwolf committed
741
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
742

743
744
745
746
747
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
                next_sentence_label=None, head_mask=None):
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)

        sequence_output, pooled_output = outputs[:2]
thomwolf's avatar
thomwolf committed
748
749
        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

750
751
        outputs = [prediction_scores, seq_relationship_score] + outputs[2:]  # add hidden states and attention if they are here

thomwolf's avatar
thomwolf committed
752
753
        if masked_lm_labels is not None and next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
754
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
755
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
756
            total_loss = masked_lm_loss + next_sentence_loss
757
758
759
            outputs = [total_loss] + outputs

        return outputs  # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
760
761


thomwolf's avatar
thomwolf committed
762
class BertForMaskedLM(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
763
764
765
766
    """BERT model with the masked language modeling head.
    This module comprises the BERT model followed by the masked language modeling head.

    Params:
767
768
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
769
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
770
771
772
773

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
774
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
775
776
777
778
779
780
781
782
783
784
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
785
786
787
788
789
790
        `head_mask`: an optional torch.LongTensor of shape [num_heads] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
791
792

    Outputs:
wlhgtc's avatar
wlhgtc committed
793
        if `masked_lm_labels` is  not `None`:
thomwolf's avatar
thomwolf committed
794
795
            Outputs the masked language modeling loss.
        if `masked_lm_labels` is `None`:
796
            Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
thomwolf's avatar
thomwolf committed
797
798
799
800
801
802

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
803
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
804

thomwolf's avatar
thomwolf committed
805
806
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
807
808
809
810
811

    model = BertForMaskedLM(config)
    masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
812
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
813
        super(BertForMaskedLM, self).__init__(config)
814
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
815
816
817
        self.output_hidden_states = output_hidden_states

        self.bert = BertModel(config, output_attentions=output_attentions )
thomwolf's avatar
thomwolf committed
818
        self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
thomwolf's avatar
thomwolf committed
819
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
820

821
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
822
823
824
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)

        sequence_output = outputs[0]
thomwolf's avatar
thomwolf committed
825
826
        prediction_scores = self.cls(sequence_output)

thomwolf's avatar
thomwolf committed
827
        outputs = [prediction_scores] + outputs[2:]  # Add hidden states and attention is they are here
thomwolf's avatar
thomwolf committed
828
829
        if masked_lm_labels is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
830
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
thomwolf's avatar
thomwolf committed
831
832
833
            outputs = [masked_lm_loss] + outputs

        return outputs  # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
834
835


thomwolf's avatar
thomwolf committed
836
class BertForNextSentencePrediction(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
837
838
839
840
    """BERT model with next sentence prediction head.
    This module comprises the BERT model followed by the next sentence classification head.

    Params:
841
842
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
843
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
844
845
846
847

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
848
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
849
850
851
852
853
854
855
856
857
858
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
            with indices selected in [0, 1].
            0 => next sentence is the continuation, 1 => next sentence is a random sentence.
859
860
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
861
862
863
864
865
866

    Outputs:
        if `next_sentence_label` is not `None`:
            Outputs the total_loss which is the sum of the masked language modeling loss and the next
            sentence classification loss.
        if `next_sentence_label` is `None`:
867
            Outputs the next sentence classification logits of shape [batch_size, 2].
thomwolf's avatar
thomwolf committed
868
869
870
871
872
873

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
874
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
875

thomwolf's avatar
thomwolf committed
876
877
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
878
879
880
881
882

    model = BertForNextSentencePrediction(config)
    seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
883
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
884
        super(BertForNextSentencePrediction, self).__init__(config)
885
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
886
887
888
        self.output_hidden_states = output_hidden_states

        self.bert = BertModel(config, output_attentions=output_attentions)
thomwolf's avatar
thomwolf committed
889
        self.cls = BertOnlyNSPHead(config)
thomwolf's avatar
thomwolf committed
890

thomwolf's avatar
thomwolf committed
891
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
892

893
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
thomwolf's avatar
thomwolf committed
894
895
896
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

897
        seq_relationship_score = self.cls(pooled_output)
thomwolf's avatar
thomwolf committed
898

thomwolf's avatar
thomwolf committed
899
        outputs = [seq_relationship_score] + outputs[2:]  # add hidden states and attention if they are here
thomwolf's avatar
thomwolf committed
900
901
        if next_sentence_label is not None:
            loss_fct = CrossEntropyLoss(ignore_index=-1)
902
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
thomwolf's avatar
thomwolf committed
903
904
905
            outputs = [next_sentence_loss] + outputs

        return outputs  # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
906
907


thomwolf's avatar
thomwolf committed
908
class BertForSequenceClassification(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
909
910
911
912
913
    """BERT model for classification.
    This module is composed of the BERT model with a linear layer on top of
    the pooled output.

    Params:
914
915
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
916
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
917
918
919
920
        `num_labels`: the number of classes for the classifier. Default = 2.

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
921
            with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
922
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
923
924
925
926
927
928
929
930
931
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_labels].
932
933
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
934
935
936
937
938

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
939
            Outputs the classification logits of shape [batch_size, num_labels].
thomwolf's avatar
thomwolf committed
940
941
942
943
944
945

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
946
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
947

thomwolf's avatar
thomwolf committed
948
949
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
950
951
952
953
954
955
956

    num_labels = 2

    model = BertForSequenceClassification(config, num_labels)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
957
    def __init__(self, config, num_labels=2, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
958
        super(BertForSequenceClassification, self).__init__(config)
959
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
960
        self.output_hidden_states = output_hidden_states
961
        self.num_labels = num_labels
thomwolf's avatar
thomwolf committed
962
963

        self.bert = BertModel(config, output_attentions=output_attentions)
thomwolf's avatar
thomwolf committed
964
965
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)
thomwolf's avatar
thomwolf committed
966

thomwolf's avatar
thomwolf committed
967
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
968

969
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
970
971
972
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

thomwolf's avatar
thomwolf committed
973
974
975
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

thomwolf's avatar
thomwolf committed
976
977
        outputs = [logits] + outputs[2:]  # add hidden states and attention if they are here

thomwolf's avatar
thomwolf committed
978
        if labels is not None:
979
980
981
982
983
984
985
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
thomwolf's avatar
thomwolf committed
986
987
988
            outputs = [loss] + outputs

        return outputs  # (loss), logits, (hidden_states), (attentions)
989
990


thomwolf's avatar
thomwolf committed
991
class BertForMultipleChoice(BertPreTrainedModel):
992
993
994
995
996
    """BERT model for multiple choice tasks.
    This module is composed of the BERT model with a linear layer on top of
    the pooled output.

    Params:
997
998
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
999
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
1000
1001
1002
1003
1004
        `num_choices`: the number of classes for the classifier. Default = 2.

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1005
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
1006
1007
1008
1009
1010
1011
1012
1013
1014
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
            with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
            and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].
1015
1016
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
            Outputs the classification logits of shape [batch_size, num_labels].

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
    input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
    token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

    num_choices = 2

    model = BertForMultipleChoice(config, num_choices)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1039
    def __init__(self, config, num_choices=2, output_attentions=False, output_hidden_states=False):
1040
        super(BertForMultipleChoice, self).__init__(config)
1041
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
1042
        self.output_hidden_states = output_hidden_states
1043
        self.num_choices = num_choices
thomwolf's avatar
thomwolf committed
1044
1045

        self.bert = BertModel(config, output_attentions=output_attentions)
1046
1047
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
thomwolf's avatar
thomwolf committed
1048

thomwolf's avatar
thomwolf committed
1049
        self.apply(self.init_weights)
1050

1051
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
1052
        flat_input_ids = input_ids.view(-1, input_ids.size(-1))
1053
1054
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
thomwolf's avatar
thomwolf committed
1055
1056
1057
        outputs = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, head_mask=head_mask)
        pooled_output = outputs[1]

1058
1059
1060
1061
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, self.num_choices)

thomwolf's avatar
thomwolf committed
1062
1063
        outputs = [reshaped_logits] + outputs[2:]  # add hidden states and attention if they are here

1064
1065
1066
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
thomwolf's avatar
thomwolf committed
1067
1068
1069
            outputs = [loss] + outputs

        return outputs  # (loss), reshaped_logits, (hidden_states), (attentions)
1070
1071


thomwolf's avatar
thomwolf committed
1072
class BertForTokenClassification(BertPreTrainedModel):
1073
1074
1075
1076
1077
    """BERT model for token-level classification.
    This module is composed of the BERT model with a linear layer on top of
    the full hidden state of the last layer.

    Params:
1078
1079
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1080
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
1081
1082
1083
1084
1085
        `num_labels`: the number of classes for the classifier. Default = 2.

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1086
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
1087
1088
1089
1090
1091
1092
1093
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
thomwolf's avatar
thomwolf committed
1094
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
1095
            with indices selected in [0, ..., num_labels].
1096
1097
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
1098
1099
1100
1101
1102

    Outputs:
        if `labels` is not `None`:
            Outputs the CrossEntropy classification loss of the output with the labels.
        if `labels` is `None`:
1103
            Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

    num_labels = 2

    model = BertForTokenClassification(config, num_labels)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1121
    def __init__(self, config, num_labels=2, output_attentions=False, output_hidden_states=False):
1122
        super(BertForTokenClassification, self).__init__(config)
1123
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
1124
        self.output_hidden_states = output_hidden_states
1125
        self.num_labels = num_labels
thomwolf's avatar
thomwolf committed
1126
1127

        self.bert = BertModel(config, output_attentions=output_attentions)
1128
1129
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)
thomwolf's avatar
thomwolf committed
1130

thomwolf's avatar
thomwolf committed
1131
        self.apply(self.init_weights)
1132

1133
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1134
1135
1136
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        sequence_output = outputs[0]

1137
1138
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
1139

thomwolf's avatar
thomwolf committed
1140
        outputs = [logits] + outputs[2:]  # add hidden states and attention if they are here
1141
1142
        if labels is not None:
            loss_fct = CrossEntropyLoss()
1143
1144
1145
1146
1147
1148
1149
1150
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
thomwolf's avatar
thomwolf committed
1151
1152
1153
            outputs = [loss] + outputs

        return outputs  # (loss), logits, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
1154
1155


thomwolf's avatar
thomwolf committed
1156
class BertForQuestionAnswering(BertPreTrainedModel):
thomwolf's avatar
thomwolf committed
1157
1158
1159
1160
1161
    """BERT model for Question Answering (span extraction).
    This module is composed of the BERT model with a linear layer on top of
    the sequence output that computes start_logits and end_logits

    Params:
1162
1163
        `config`: a BertConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1164
        `output_hidden_states`: If True, also output hidden states computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
1165
1166
1167
1168

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1169
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see BERT paper for more details).
        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
1183
1184
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
1185
1186
1187
1188
1189
1190

    Outputs:
        if `start_positions` and `end_positions` are not `None`:
            Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
        if `start_positions` or `end_positions` is `None`:
            Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
1191
            position tokens of shape [batch_size, sequence_length].
thomwolf's avatar
thomwolf committed
1192
1193
1194
1195
1196
1197

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
1198
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
thomwolf's avatar
thomwolf committed
1199

thomwolf's avatar
thomwolf committed
1200
1201
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
1202
1203
1204
1205
1206

    model = BertForQuestionAnswering(config)
    start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
    ```
    """
thomwolf's avatar
thomwolf committed
1207
    def __init__(self, config, output_attentions=False, output_hidden_states=False):
thomwolf's avatar
thomwolf committed
1208
        super(BertForQuestionAnswering, self).__init__(config)
1209
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
1210
1211
        self.output_hidden_states = output_hidden_states
        self.bert = BertModel(config, output_attentions=output_attentions)
thomwolf's avatar
thomwolf committed
1212
        self.qa_outputs = nn.Linear(config.hidden_size, 2)
thomwolf's avatar
thomwolf committed
1213

thomwolf's avatar
thomwolf committed
1214
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1215

thomwolf's avatar
thomwolf committed
1216
1217
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
                end_positions=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1218
1219
1220
        outputs = self.bert(input_ids, token_type_ids, attention_mask, head_mask=head_mask)
        sequence_output = outputs[0]

thomwolf's avatar
thomwolf committed
1221
1222
1223
1224
1225
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

thomwolf's avatar
thomwolf committed
1226
        outputs = [start_logits, end_logits] + outputs[2:]
thomwolf's avatar
thomwolf committed
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
thomwolf's avatar
thomwolf committed
1242
1243
1244
            outputs = [total_loss] + outputs

        return outputs  # (loss), start_logits, end_logits, (hidden_states), (attentions)