modeling_pytorch.py 17.5 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions related to TensorFlow."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
import torch
import torch.nn as nn
30
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
31

thomwolf's avatar
thomwolf committed
32
33
34
35
36
def gelu(x):
    raise NotImplementedError
    # TF BERT says: cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))

thomwolf's avatar
thomwolf committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113

class BertConfig(object):
    """Configuration for `BertModel`."""

    def __init__(self,
                             vocab_size,
                             hidden_size=768,
                             num_hidden_layers=12,
                             num_attention_heads=12,
                             intermediate_size=3072,
                             hidden_act="gelu",
                             hidden_dropout_prob=0.1,
                             attention_probs_dropout_prob=0.1,
                             max_position_embeddings=512,
                             type_vocab_size=16,
                             initializer_range=0.02):
        """Constructs BertConfig.

        Args:
            vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
        """
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.hidden_act = hidden_act
        self.intermediate_size = intermediate_size
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range

    @classmethod
    def from_dict(cls, json_object):
        """Constructs a `BertConfig` from a Python dictionary of parameters."""
        config = BertConfig(vocab_size=None)
        for (key, value) in six.iteritems(json_object):
            config.__dict__[key] = value
        return config

    @classmethod
    def from_json_file(cls, json_file):
        """Constructs a `BertConfig` from a json file of parameters."""
        with open(json_file, "r") as reader:
            text = reader.read()
        return cls.from_dict(json.loads(text))

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"


thomwolf's avatar
thomwolf committed
114
class BERTLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
115
116
117
118
119
120
121
122
    def __init__(self, config, variance_epsilon=1e-12):
        "Construct a layernorm module in the TF style (epsilon inside the square root)."
        super(BERTLayerNorm, self).__init__()
        self.gamma = nn.Parameter(torch.ones(config.hidden_size))
        self.beta = nn.Parameter(torch.zeros(config.hidden_size))
        self.variance_epsilon = variance_epsilon

    def forward(self, x):
thomwolf's avatar
thomwolf committed
123
        # TODO check it's identical to TF implementation in details (epsilon and axes)
thomwolf's avatar
thomwolf committed
124
125
126
127
128
129
        u = x.mean(-1, keepdim=True)
        s = (x - u).pow(2).mean(-1, keepdim=True)
        x = (x - u) / torch.sqrt(s + self.variance_epsilon)
        return self.gamma * x + self.beta
    #     tf.contrib.layers.layer_norm(
    #   inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
thomwolf's avatar
thomwolf committed
130

thomwolf's avatar
thomwolf committed
131
class BERTEmbeddings(nn.Module):
thomwolf's avatar
thomwolf committed
132
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
133
134
        super(BERTEmbeddings, self).__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
135
136
137
138
139
140
141
142
143
144
145

        # Position embeddings are (normally) a contiguous range so we could use a slice
        # Since the position embedding table is a learned variable, we create it
        # using a (long) sequence length `max_position_embeddings`. The actual
        # sequence length might be shorter than this, for faster training of
        # tasks that do not have long sequences.
        #
        # So `full_position_embeddings` is effectively an embedding table
        # for position [0, 1, 2, ..., max_position_embeddings-1], and the current
        # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
        # perform a slice.
thomwolf's avatar
thomwolf committed
146
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
thomwolf's avatar
thomwolf committed
147

thomwolf's avatar
thomwolf committed
148
        # token_type_embeddings vocabulary is very small. TF used one-hot embeddings to speedup.
thomwolf's avatar
thomwolf committed
149
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
150

thomwolf's avatar
thomwolf committed
151
        self.LayerNorm = BERTLayerNorm(config) # Not snake-cased to stick with TF model variable name
thomwolf's avatar
thomwolf committed
152
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
153
154
155
156

    def forward(self, input_ids, token_type_ids=None):
        batch_size = input_ids.size(0)
        seq_length = input_ids.size(1)
thomwolf's avatar
thomwolf committed
157
        # TODO finich that
thomwolf's avatar
thomwolf committed
158
        position_ids = torch.range().view(batch_size, seq_length)
thomwolf's avatar
thomwolf committed
159
160
        if token_type_ids is None:
            token_type_ids = torch.zeros(batch_size, seq_length)
thomwolf's avatar
thomwolf committed
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)
    
        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BERTSelfAttention(nn.Module):
    def __init__(self, config):
        super(BERTSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
179
180
181
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
thomwolf's avatar
thomwolf committed
182

thomwolf's avatar
thomwolf committed
183
184
185
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)
thomwolf's avatar
thomwolf committed
186

thomwolf's avatar
thomwolf committed
187
188
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

thomwolf's avatar
thomwolf committed
189
    def transpose_for_scores(self, x, is_key_tensor=False):
thomwolf's avatar
thomwolf committed
190
191
192
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        if is_key_tensor:
thomwolf's avatar
thomwolf committed
193
194
195
196
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

thomwolf's avatar
thomwolf committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
    def forward(self, hidden_states, attention_mask):
        # Scalar dimensions referenced here:
        #   B = batch size (number of sequences)
        #   F = `from_tensor` sequence length
        #   T = `to_tensor` sequence length
        #   N = `num_attention_heads`
        #   H = `size_per_head`
        query_layer = self.query(hidden_states)
        key_layer = self.key(hidden_states)
        value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(query_layer)
        key_layer = self.transpose_for_scores(key_layer, is_key_tensor=True)
        value_layer = self.transpose_for_scores(value_layer)

        # Take the dot product between "query" and "key" to get the raw
        # attention scores.
        # `attention_scores` = [B, N, F, T]
        attention_scores = torch.matmul(query_layer, key_layer)
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)

        # TODO clean up this (precompute)
        # MY PYTORCH: w = w * self.b + -1e9 * (1 - self.b)  # TF implem method: mask_attn_weights
        # `attention_mask` = [B, 1, F, T]
        attention_mask = tf.expand_dims(attention_mask, axis=[1])
        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        adder = (1.0 - attention_mask) * -10000.0
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        attention_scores += adder

        # Normalize the attention scores to probabilities.
        # `attention_probs` = [B, N, F, T]
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
thomwolf's avatar
thomwolf committed
241
        context_layer = context_layer.view(*new_context_layer_shape)
thomwolf's avatar
thomwolf committed
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

        return context_layer


class BERTSelfOutput(nn.Module):
    def __init__(self, config):
        super(BERTSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = BERTLayerNorm(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(input_tensor)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
thomwolf's avatar
thomwolf committed
257
258
259
260
261
262
263
        return hidden_states


class BERTAttention(nn.Module):
    def __init__(self, config):
        super(BERTAttention, self).__init__()
        self.self = BERTSelfAttention(config)
thomwolf's avatar
thomwolf committed
264
265
266
267
268
269
270
271
272
273
        self.output = BERTSelfOutput(config)

    def forward(self, input_tensor, attention_mask):
        attention_output = self.self(input_tensor, attention_mask)
        attention_output = self.output(attention_output, input_tensor)
        return attention_output


class BERTIntermediate(nn.Module):
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
274
        super(BERTIntermediate, self).__init__()
thomwolf's avatar
thomwolf committed
275
276
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        self.intermediate_act_fn = gelu
thomwolf's avatar
thomwolf committed
277
278

    def forward(self, hidden_states):
thomwolf's avatar
thomwolf committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BERTOutput(nn.Module):
    def __init__(self, config):
        super(BERTOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = BERTLayerNorm(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(input_tensor)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
thomwolf's avatar
thomwolf committed
295
296
297
298
299
300
301
302
303
304
        return hidden_states


class BERTLayer(nn.Module):
    def __init__(self, config):
        super(BERTLayer, self).__init__()
        self.attention = BERTAttention(config)
        self.intermediate = BERTIntermediate(config)
        self.output = BERTOutput(config)

thomwolf's avatar
thomwolf committed
305
306
307
308
    def forward(self, hidden_states, attention_mask):
        attention_output = self.attention(hidden_states, attention_mask)
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
thomwolf's avatar
thomwolf committed
309
        return layer_output
thomwolf's avatar
thomwolf committed
310
311
312
313
314


class BERTEncoder(nn.Module):
    def __init__(self, config):
        super(BERTEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
315
        layer = BERTLayer(config)
thomwolf's avatar
thomwolf committed
316
317
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])    

thomwolf's avatar
thomwolf committed
318
319
320
321
322
323
324
    def forward(self, hidden_states, attention_mask):
        """
        Args:
            hidden_states: float Tensor of shape [batch_size, seq_length, hidden_size]
        Return:
            float Tensor of shape [batch_size, seq_length, hidden_size]
        """
thomwolf's avatar
thomwolf committed
325
        all_encoder_layers = []
thomwolf's avatar
thomwolf committed
326
327
        for layer_module in self.layer:
            hidden_states = layer_module(hidden_states, attention_mask)
thomwolf's avatar
thomwolf committed
328
329
            all_encoder_layers.append(hidden_states)
        return all_encoder_layers
thomwolf's avatar
thomwolf committed
330
331
332
333
334


class BERTPooler(nn.Module):
    def __init__(self, config):
        super(BERTPooler, self).__init__()
thomwolf's avatar
thomwolf committed
335
336
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()
thomwolf's avatar
thomwolf committed
337

thomwolf's avatar
thomwolf committed
338
    def forward(self, hidden_states):
thomwolf's avatar
thomwolf committed
339
340
341
342
        """
        Args:
            hidden_states: float Tensor of shape [batch_size, seq_length, hidden_size]
        Return:
thomwolf's avatar
thomwolf committed
343
            float Tensor of shape [batch_size, hidden_size]
thomwolf's avatar
thomwolf committed
344
        """
thomwolf's avatar
thomwolf committed
345
346
347
348
349
350
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token. We assume that this has been pre-trained
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output
thomwolf's avatar
thomwolf committed
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376


class BertModel(nn.Module):
    """BERT model ("Bidirectional Embedding Representations from a Transformer").

    Example usage:

    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])

    config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
        num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)

    model = modeling.BertModel(config=config, is_training=True,
        input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)

    label_embeddings = tf.get_variable(...)
    pooled_output = model.get_pooled_output()
    logits = tf.matmul(pooled_output, label_embeddings)
    ...
    ```
    """

thomwolf's avatar
thomwolf committed
377
    def __init__(self, config: BertConfig):
thomwolf's avatar
thomwolf committed
378
379
380
381
382
383
384
385
386
        """Constructor for BertModel.

        Args:
            config: `BertConfig` instance.

        Raises:
            ValueError: The config is invalid or one of the input tensor shapes
                is invalid.
        """
thomwolf's avatar
thomwolf committed
387
        super(BertModel, self).__init__()
thomwolf's avatar
thomwolf committed
388
389
        self.embeddings = BERTEmbeddings(config)
        self.encoder = BERTEncoder(config)
thomwolf's avatar
thomwolf committed
390
        self.pooler = BERTPooler(config)
thomwolf's avatar
thomwolf committed
391

thomwolf's avatar
thomwolf committed
392
393
394
    def forward(self, input_ids, token_type_ids, attention_mask):
        embedding_output = self.embeddings(input_ids, token_type_ids)
        all_encoder_layers = self.encoder(embedding_output, attention_mask)
thomwolf's avatar
thomwolf committed
395
396
397
        sequence_output = all_encoder_layers[-1]
        pooled_output = self.pooler(sequence_output)
        return all_encoder_layers, pooled_output
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424

class BertForSequenceClassification(nn.Module):
    def __init__(self, config, num_labels):
        super(BertForSequenceClassification, self).__init__()
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)

        def init_weights(m):
            if isinstance(m) == nn.Linear or isinstance(m) == nn.Embedding:
                print("Initializing {}".format(m))
                # Slight difference here with the TF version which uses truncated_normal
                # cf https://github.com/pytorch/pytorch/pull/5617
                m.weight.normal_(config.initializer_range)
        self.apply(init_weights)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None):
        _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits, labels)
            return loss, logits
        else:
            return logits