"tests/prompt/test_prompt_list.py" did not exist on "e6b5bdcb87f4b9e3cfa533b6aed5a0baed586b6b"
modeling_pytorch.py 18.4 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions related to TensorFlow."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import copy
import json
import math
import six
import torch
import torch.nn as nn
27
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
28

thomwolf's avatar
thomwolf committed
29
def gelu(x):
30
31
    return 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
    # OpenAI GPT gelu version was : 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
thomwolf's avatar
thomwolf committed
32

thomwolf's avatar
thomwolf committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109

class BertConfig(object):
    """Configuration for `BertModel`."""

    def __init__(self,
                             vocab_size,
                             hidden_size=768,
                             num_hidden_layers=12,
                             num_attention_heads=12,
                             intermediate_size=3072,
                             hidden_act="gelu",
                             hidden_dropout_prob=0.1,
                             attention_probs_dropout_prob=0.1,
                             max_position_embeddings=512,
                             type_vocab_size=16,
                             initializer_range=0.02):
        """Constructs BertConfig.

        Args:
            vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
            hidden_size: Size of the encoder layers and the pooler layer.
            num_hidden_layers: Number of hidden layers in the Transformer encoder.
            num_attention_heads: Number of attention heads for each attention layer in
                the Transformer encoder.
            intermediate_size: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            hidden_act: The non-linear activation function (function or string) in the
                encoder and pooler.
            hidden_dropout_prob: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            type_vocab_size: The vocabulary size of the `token_type_ids` passed into
                `BertModel`.
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
        """
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.hidden_act = hidden_act
        self.intermediate_size = intermediate_size
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range

    @classmethod
    def from_dict(cls, json_object):
        """Constructs a `BertConfig` from a Python dictionary of parameters."""
        config = BertConfig(vocab_size=None)
        for (key, value) in six.iteritems(json_object):
            config.__dict__[key] = value
        return config

    @classmethod
    def from_json_file(cls, json_file):
        """Constructs a `BertConfig` from a json file of parameters."""
        with open(json_file, "r") as reader:
            text = reader.read()
        return cls.from_dict(json.loads(text))

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"


thomwolf's avatar
thomwolf committed
110
class BERTLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
111
112
113
114
115
116
117
118
    def __init__(self, config, variance_epsilon=1e-12):
        "Construct a layernorm module in the TF style (epsilon inside the square root)."
        super(BERTLayerNorm, self).__init__()
        self.gamma = nn.Parameter(torch.ones(config.hidden_size))
        self.beta = nn.Parameter(torch.zeros(config.hidden_size))
        self.variance_epsilon = variance_epsilon

    def forward(self, x):
thomwolf's avatar
thomwolf committed
119
        # TODO check it's identical to TF implementation in details (epsilon and axes)
thomwolf's avatar
thomwolf committed
120
121
122
123
124
125
        u = x.mean(-1, keepdim=True)
        s = (x - u).pow(2).mean(-1, keepdim=True)
        x = (x - u) / torch.sqrt(s + self.variance_epsilon)
        return self.gamma * x + self.beta
    #     tf.contrib.layers.layer_norm(
    #   inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
thomwolf's avatar
thomwolf committed
126

thomwolf's avatar
thomwolf committed
127
class BERTEmbeddings(nn.Module):
thomwolf's avatar
thomwolf committed
128
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
129
130
        super(BERTEmbeddings, self).__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
131
132
133
134
135
136
137
138
139
140
141

        # Position embeddings are (normally) a contiguous range so we could use a slice
        # Since the position embedding table is a learned variable, we create it
        # using a (long) sequence length `max_position_embeddings`. The actual
        # sequence length might be shorter than this, for faster training of
        # tasks that do not have long sequences.
        #
        # So `full_position_embeddings` is effectively an embedding table
        # for position [0, 1, 2, ..., max_position_embeddings-1], and the current
        # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
        # perform a slice.
thomwolf's avatar
thomwolf committed
142
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
thomwolf's avatar
thomwolf committed
143

thomwolf's avatar
thomwolf committed
144
        # token_type_embeddings vocabulary is very small. TF used one-hot embeddings to speedup.
thomwolf's avatar
thomwolf committed
145
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
thomwolf's avatar
thomwolf committed
146

thomwolf's avatar
thomwolf committed
147
        self.LayerNorm = BERTLayerNorm(config) # Not snake-cased to stick with TF model variable name
thomwolf's avatar
thomwolf committed
148
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
149
150
151

    def forward(self, input_ids, token_type_ids=None):
        seq_length = input_ids.size(1)
152
153
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
thomwolf's avatar
thomwolf committed
154
        if token_type_ids is None:
155
            token_type_ids = torch.zeros_like(input_ids)
thomwolf's avatar
thomwolf committed
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)
    
        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class BERTSelfAttention(nn.Module):
    def __init__(self, config):
        super(BERTSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
thomwolf's avatar
thomwolf committed
174
175
176
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
thomwolf's avatar
thomwolf committed
177

thomwolf's avatar
thomwolf committed
178
179
180
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)
thomwolf's avatar
thomwolf committed
181

thomwolf's avatar
thomwolf committed
182
183
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

thomwolf's avatar
thomwolf committed
184
    def transpose_for_scores(self, x, is_key_tensor=False):
thomwolf's avatar
thomwolf committed
185
186
187
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        if is_key_tensor:
thomwolf's avatar
thomwolf committed
188
189
190
191
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

thomwolf's avatar
thomwolf committed
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    def forward(self, hidden_states, attention_mask):
        # Scalar dimensions referenced here:
        #   B = batch size (number of sequences)
        #   F = `from_tensor` sequence length
        #   T = `to_tensor` sequence length
        #   N = `num_attention_heads`
        #   H = `size_per_head`
        query_layer = self.query(hidden_states)
        key_layer = self.key(hidden_states)
        value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(query_layer)
        key_layer = self.transpose_for_scores(key_layer, is_key_tensor=True)
        value_layer = self.transpose_for_scores(value_layer)

        # Take the dot product between "query" and "key" to get the raw
        # attention scores.
        # `attention_scores` = [B, N, F, T]
        attention_scores = torch.matmul(query_layer, key_layer)
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)

        # TODO clean up this (precompute)
        # MY PYTORCH: w = w * self.b + -1e9 * (1 - self.b)  # TF implem method: mask_attn_weights
        # `attention_mask` = [B, 1, F, T]
216
        # attention_mask = tf.expand_dims(attention_mask, axis=[1])
thomwolf's avatar
thomwolf committed
217
218
219
        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
220
        # adder = (1.0 - attention_mask) * -10000.0
thomwolf's avatar
thomwolf committed
221
222
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
223
        attention_scores += attention_mask
thomwolf's avatar
thomwolf committed
224
225
226
227
228
229
230
231
232
233
234
235

        # Normalize the attention scores to probabilities.
        # `attention_probs` = [B, N, F, T]
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
thomwolf's avatar
thomwolf committed
236
        context_layer = context_layer.view(*new_context_layer_shape)
thomwolf's avatar
thomwolf committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251

        return context_layer


class BERTSelfOutput(nn.Module):
    def __init__(self, config):
        super(BERTSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = BERTLayerNorm(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(input_tensor)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
thomwolf's avatar
thomwolf committed
252
253
254
255
256
257
258
        return hidden_states


class BERTAttention(nn.Module):
    def __init__(self, config):
        super(BERTAttention, self).__init__()
        self.self = BERTSelfAttention(config)
thomwolf's avatar
thomwolf committed
259
260
261
262
263
264
265
266
267
268
        self.output = BERTSelfOutput(config)

    def forward(self, input_tensor, attention_mask):
        attention_output = self.self(input_tensor, attention_mask)
        attention_output = self.output(attention_output, input_tensor)
        return attention_output


class BERTIntermediate(nn.Module):
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
269
        super(BERTIntermediate, self).__init__()
thomwolf's avatar
thomwolf committed
270
271
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        self.intermediate_act_fn = gelu
thomwolf's avatar
thomwolf committed
272
273

    def forward(self, hidden_states):
thomwolf's avatar
thomwolf committed
274
275
276
277
278
279
280
281
282
283
284
285
286
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


class BERTOutput(nn.Module):
    def __init__(self, config):
        super(BERTOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = BERTLayerNorm(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
287
        hidden_states = self.dense(hidden_states)
thomwolf's avatar
thomwolf committed
288
289
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
thomwolf's avatar
thomwolf committed
290
291
292
293
294
295
296
297
298
299
        return hidden_states


class BERTLayer(nn.Module):
    def __init__(self, config):
        super(BERTLayer, self).__init__()
        self.attention = BERTAttention(config)
        self.intermediate = BERTIntermediate(config)
        self.output = BERTOutput(config)

thomwolf's avatar
thomwolf committed
300
301
302
303
    def forward(self, hidden_states, attention_mask):
        attention_output = self.attention(hidden_states, attention_mask)
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
thomwolf's avatar
thomwolf committed
304
        return layer_output
thomwolf's avatar
thomwolf committed
305
306
307
308
309


class BERTEncoder(nn.Module):
    def __init__(self, config):
        super(BERTEncoder, self).__init__()
thomwolf's avatar
thomwolf committed
310
        layer = BERTLayer(config)
thomwolf's avatar
thomwolf committed
311
312
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])    

thomwolf's avatar
thomwolf committed
313
314
315
316
317
318
319
    def forward(self, hidden_states, attention_mask):
        """
        Args:
            hidden_states: float Tensor of shape [batch_size, seq_length, hidden_size]
        Return:
            float Tensor of shape [batch_size, seq_length, hidden_size]
        """
thomwolf's avatar
thomwolf committed
320
        all_encoder_layers = []
thomwolf's avatar
thomwolf committed
321
322
        for layer_module in self.layer:
            hidden_states = layer_module(hidden_states, attention_mask)
thomwolf's avatar
thomwolf committed
323
324
            all_encoder_layers.append(hidden_states)
        return all_encoder_layers
thomwolf's avatar
thomwolf committed
325
326
327
328
329


class BERTPooler(nn.Module):
    def __init__(self, config):
        super(BERTPooler, self).__init__()
thomwolf's avatar
thomwolf committed
330
331
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()
thomwolf's avatar
thomwolf committed
332

thomwolf's avatar
thomwolf committed
333
    def forward(self, hidden_states):
thomwolf's avatar
thomwolf committed
334
335
336
337
        """
        Args:
            hidden_states: float Tensor of shape [batch_size, seq_length, hidden_size]
        Return:
thomwolf's avatar
thomwolf committed
338
            float Tensor of shape [batch_size, hidden_size]
thomwolf's avatar
thomwolf committed
339
        """
thomwolf's avatar
thomwolf committed
340
341
342
343
344
345
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token. We assume that this has been pre-trained
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output
thomwolf's avatar
thomwolf committed
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360


class BertModel(nn.Module):
    """BERT model ("Bidirectional Embedding Representations from a Transformer").

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])

    config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
        num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)

thomwolf's avatar
thomwolf committed
361
362
    model = modeling.BertModel(config=config)
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
363
364
    ```
    """
thomwolf's avatar
thomwolf committed
365
    def __init__(self, config: BertConfig):
thomwolf's avatar
thomwolf committed
366
367
368
369
370
371
372
373
374
        """Constructor for BertModel.

        Args:
            config: `BertConfig` instance.

        Raises:
            ValueError: The config is invalid or one of the input tensor shapes
                is invalid.
        """
thomwolf's avatar
thomwolf committed
375
        super(BertModel, self).__init__()
thomwolf's avatar
thomwolf committed
376
377
        self.embeddings = BERTEmbeddings(config)
        self.encoder = BERTEncoder(config)
thomwolf's avatar
thomwolf committed
378
        self.pooler = BERTPooler(config)
thomwolf's avatar
thomwolf committed
379

thomwolf's avatar
thomwolf committed
380
    def forward(self, input_ids, token_type_ids, attention_mask):
381
382
383
384
385
386
387
388
        # We create 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, from_seq_length]
        # So we can broadcast to [batch_size, num_heads, to_seq_length, from_seq_length]
        # It's more simple than the triangular masking of causal attention, just need to
        # prepare the broadcast here
        attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
        attention_mask = (1.0 - attention_mask) * -10000.0

thomwolf's avatar
thomwolf committed
389
390
        embedding_output = self.embeddings(input_ids, token_type_ids)
        all_encoder_layers = self.encoder(embedding_output, attention_mask)
thomwolf's avatar
thomwolf committed
391
392
393
        sequence_output = all_encoder_layers[-1]
        pooled_output = self.pooler(sequence_output)
        return all_encoder_layers, pooled_output
394
395

class BertForSequenceClassification(nn.Module):
thomwolf's avatar
thomwolf committed
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
    """BERT model for classification.
    This module is composed of the BERT model with a linear layer on top of
    the pooled output.

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])

    config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
        num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)

    num_labels = 2

    model = modeling.BertModel(config, num_labels)
    logits = model(input_ids, token_type_ids, input_mask)
    ```
thomwolf's avatar
thomwolf committed
415
416
    """
    def __init__(self, config, num_labels):
417
418
419
420
421
422
        super(BertForSequenceClassification, self).__init__()
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)

        def init_weights(m):
423
            if isinstance(m, nn.Linear) or isinstance(m, nn.Embedding):
424
425
426
                print("Initializing {}".format(m))
                # Slight difference here with the TF version which uses truncated_normal
                # cf https://github.com/pytorch/pytorch/pull/5617
427
                m.weight.data.normal_(config.initializer_range)
428
429
430
431
432
433
434
435
436
437
438
439
440
        self.apply(init_weights)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None):
        _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits, labels)
            return loss, logits
        else:
            return logits