modeling_electra.py 58 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ELECTRA model. """

import math
Lysandre Debut's avatar
Lysandre Debut committed
18
import os
19
20
from dataclasses import dataclass
from typing import Optional, Tuple
Lysandre Debut's avatar
Lysandre Debut committed
21
22
23

import torch
import torch.nn as nn
24
import torch.utils.checkpoint
25
from torch.nn import CrossEntropyLoss, MSELoss
Lysandre Debut's avatar
Lysandre Debut committed
26

Sylvain Gugger's avatar
Sylvain Gugger committed
27
28
from ...activations import ACT2FN, get_activation
from ...file_utils import (
29
30
31
    ModelOutput,
    add_code_sample_docstrings,
    add_start_docstrings,
32
    add_start_docstrings_to_model_forward,
33
34
    replace_return_docstrings,
)
Sylvain Gugger's avatar
Sylvain Gugger committed
35
from ...modeling_outputs import (
36
    BaseModelOutputWithCrossAttentions,
37
    BaseModelOutputWithPastAndCrossAttentions,
38
39
40
41
42
43
    MaskedLMOutput,
    MultipleChoiceModelOutput,
    QuestionAnsweringModelOutput,
    SequenceClassifierOutput,
    TokenClassifierOutput,
)
Sylvain Gugger's avatar
Sylvain Gugger committed
44
from ...modeling_utils import (
45
46
47
48
49
50
    PreTrainedModel,
    SequenceSummary,
    apply_chunking_to_forward,
    find_pruneable_heads_and_indices,
    prune_linear_layer,
)
Sylvain Gugger's avatar
Sylvain Gugger committed
51
52
from ...utils import logging
from .configuration_electra import ElectraConfig
Lysandre Debut's avatar
Lysandre Debut committed
53
54


Lysandre Debut's avatar
Lysandre Debut committed
55
logger = logging.get_logger(__name__)
Lysandre Debut's avatar
Lysandre Debut committed
56

57
_CONFIG_FOR_DOC = "ElectraConfig"
58
_TOKENIZER_FOR_DOC = "ElectraTokenizer"
Lysandre Debut's avatar
Lysandre Debut committed
59

60
61
62
63
64
65
66
67
68
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "google/electra-small-generator",
    "google/electra-base-generator",
    "google/electra-large-generator",
    "google/electra-small-discriminator",
    "google/electra-base-discriminator",
    "google/electra-large-discriminator",
    # See all ELECTRA models at https://huggingface.co/models?filter=electra
]
Lysandre Debut's avatar
Lysandre Debut committed
69
70
71


def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
Lysandre's avatar
Lysandre committed
72
    """Load tf checkpoints in a pytorch model."""
Lysandre Debut's avatar
Lysandre Debut committed
73
74
    try:
        import re
75

Lysandre Debut's avatar
Lysandre Debut committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
        import numpy as np
        import tensorflow as tf
    except ImportError:
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise
    tf_path = os.path.abspath(tf_checkpoint_path)
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)
    for name, array in zip(names, arrays):
        original_name: str = name

        try:
            if isinstance(model, ElectraForMaskedLM):
                name = name.replace("electra/embeddings/", "generator/embeddings/")

            if discriminator_or_generator == "generator":
                name = name.replace("electra/", "discriminator/")
                name = name.replace("generator/", "electra/")

            name = name.replace("dense_1", "dense_prediction")
            name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")

            name = name.split("/")
            # print(original_name, name)
            # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
            # which are not required for using pretrained model
            if any(n in ["global_step", "temperature"] for n in name):
                logger.info("Skipping {}".format(original_name))
                continue
            pointer = model
            for m_name in name:
                if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
                    scope_names = re.split(r"_(\d+)", m_name)
                else:
                    scope_names = [m_name]
                if scope_names[0] == "kernel" or scope_names[0] == "gamma":
                    pointer = getattr(pointer, "weight")
                elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
                    pointer = getattr(pointer, "bias")
                elif scope_names[0] == "output_weights":
                    pointer = getattr(pointer, "weight")
                elif scope_names[0] == "squad":
                    pointer = getattr(pointer, "classifier")
                else:
                    pointer = getattr(pointer, scope_names[0])
                if len(scope_names) >= 2:
                    num = int(scope_names[1])
                    pointer = pointer[num]
            if m_name.endswith("_embeddings"):
                pointer = getattr(pointer, "weight")
            elif m_name == "kernel":
                array = np.transpose(array)
            try:
Teven's avatar
Teven committed
140
141
142
                assert (
                    pointer.shape == array.shape
                ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
Lysandre Debut's avatar
Lysandre Debut committed
143
144
145
146
147
148
149
150
151
152
153
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
            print("Initialize PyTorch weight {}".format(name), original_name)
            pointer.data = torch.from_numpy(array)
        except AttributeError as e:
            print("Skipping {}".format(original_name), name, e)
            continue
    return model


154
class ElectraEmbeddings(nn.Module):
Lysandre Debut's avatar
Lysandre Debut committed
155
156
157
    """Construct the embeddings from word, position and token_type embeddings."""

    def __init__(self, config):
158
        super().__init__()
Lysandre Debut's avatar
Lysandre Debut committed
159
160
161
162
163
164
        self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
165
166
167
168
169
        self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        # position_ids (1, len position emb) is contiguous in memory and exported when serialized
        self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
170
        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
171

Sylvain Gugger's avatar
Sylvain Gugger committed
172
    # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
173
174
175
    def forward(
        self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
    ):
176
177
178
179
180
181
182
183
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]

        seq_length = input_shape[1]

        if position_ids is None:
184
            position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
185
186
187
188
189
190
191
192

        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)

        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

193
194
195
196
        embeddings = inputs_embeds + token_type_embeddings
        if self.position_embedding_type == "absolute":
            position_embeddings = self.position_embeddings(position_ids)
            embeddings += position_embeddings
197
198
199
200
201
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


Sylvain Gugger's avatar
Sylvain Gugger committed
202
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
class ElectraSelfAttention(nn.Module):
    def __init__(self, config):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads)
            )

        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
221
        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
222
223
224
        if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
            self.max_position_embeddings = config.max_position_embeddings
            self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
225

226
227
        self.is_decoder = config.is_decoder

228
229
230
231
232
233
234
235
236
237
238
239
    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
240
        past_key_value=None,
241
242
243
244
245
246
247
        output_attentions=False,
    ):
        mixed_query_layer = self.query(hidden_states)

        # If this is instantiated as a cross-attention module, the keys
        # and values come from an encoder; the attention mask needs to be
        # such that the encoder's padding tokens are not attended to.
248
249
250
251
252
253
        is_cross_attention = encoder_hidden_states is not None

        if is_cross_attention and past_key_value is not None:
            # reuse k,v, cross_attentions
            key_layer = past_key_value[0]
            value_layer = past_key_value[1]
254
            attention_mask = encoder_attention_mask
255
256
257
258
259
260
261
262
263
        elif is_cross_attention:
            key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
            value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
            attention_mask = encoder_attention_mask
        elif past_key_value is not None:
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))
            key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
            value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
264
        else:
265
266
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))
267
268

        query_layer = self.transpose_for_scores(mixed_query_layer)
269
270
271
272
273
274
275
276
277
278

        if self.is_decoder:
            # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
            # Further calls to cross_attention layer can then reuse all cross-attention
            # key/value_states (first "if" case)
            # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
            # all previous decoder key/value_states. Further calls to uni-directional self-attention
            # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
            # if encoder bi-directional self-attention `past_key_value` is always `None`
            past_key_value = (key_layer, value_layer)
279
280
281

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298

        if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
            seq_length = hidden_states.size()[1]
            position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
            position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
            distance = position_ids_l - position_ids_r
            positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
            positional_embedding = positional_embedding.to(dtype=query_layer.dtype)  # fp16 compatibility

            if self.position_embedding_type == "relative_key":
                relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores
            elif self.position_embedding_type == "relative_key_query":
                relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
                relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key

299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if attention_mask is not None:
            # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)
            attention_scores = attention_scores + attention_mask

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

        context_layer = torch.matmul(attention_probs, value_layer)

        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)

        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
322
323
324

        if self.is_decoder:
            outputs = outputs + (past_key_value,)
325
326
327
        return outputs


Sylvain Gugger's avatar
Sylvain Gugger committed
328
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
329
330
331
332
333
334
335
336
337
338
339
340
341
342
class ElectraSelfOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


Sylvain Gugger's avatar
Sylvain Gugger committed
343
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
class ElectraAttention(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.self = ElectraSelfAttention(config)
        self.output = ElectraSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # Update hyper params and store pruned heads
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
376
        past_key_value=None,
377
378
379
380
381
382
383
384
        output_attentions=False,
    ):
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
385
            past_key_value,
386
387
388
389
390
391
392
            output_attentions,
        )
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
        return outputs


Sylvain Gugger's avatar
Sylvain Gugger committed
393
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
class ElectraIntermediate(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


Sylvain Gugger's avatar
Sylvain Gugger committed
409
# Copied from transformers.models.bert.modeling_bert.BertOutput
410
411
412
413
414
415
416
417
418
419
420
421
422
423
class ElectraOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


Sylvain Gugger's avatar
Sylvain Gugger committed
424
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
class ElectraLayer(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = ElectraAttention(config)
        self.is_decoder = config.is_decoder
        self.add_cross_attention = config.add_cross_attention
        if self.add_cross_attention:
            assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
            self.crossattention = ElectraAttention(config)
        self.intermediate = ElectraIntermediate(config)
        self.output = ElectraOutput(config)

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
446
        past_key_value=None,
447
448
        output_attentions=False,
    ):
449
450
        # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
451
452
453
454
455
        self_attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            head_mask,
            output_attentions=output_attentions,
456
            past_key_value=self_attn_past_key_value,
457
458
459
        )
        attention_output = self_attention_outputs[0]

460
461
462
463
464
465
466
467
        # if decoder, the last output is tuple of self-attn cache
        if self.is_decoder:
            outputs = self_attention_outputs[1:-1]
            present_key_value = self_attention_outputs[-1]
        else:
            outputs = self_attention_outputs[1:]  # add self attentions if we output attention weights

        cross_attn_present_key_value = None
468
469
470
471
        if self.is_decoder and encoder_hidden_states is not None:
            assert hasattr(
                self, "crossattention"
            ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
472
473
474

            # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
475
476
477
478
479
480
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask,
                head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
481
                cross_attn_past_key_value,
482
483
484
                output_attentions,
            )
            attention_output = cross_attention_outputs[0]
485
486
487
488
489
            outputs = outputs + cross_attention_outputs[1:-1]  # add cross attentions if we output attention weights

            # add cross-attn cache to positions 3,4 of present_key_value tuple
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value
490
491
492
493
494

        layer_output = apply_chunking_to_forward(
            self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
        )
        outputs = (layer_output,) + outputs
495
496
497
498
499

        # if decoder, return the attn key/values as the last output
        if self.is_decoder:
            outputs = outputs + (present_key_value,)

500
501
502
503
504
505
506
507
        return outputs

    def feed_forward_chunk(self, attention_output):
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output


Sylvain Gugger's avatar
Sylvain Gugger committed
508
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra
509
510
511
512
513
514
515
516
517
518
519
520
521
class ElectraEncoder(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)])

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
522
523
        past_key_values=None,
        use_cache=None,
524
525
        output_attentions=False,
        output_hidden_states=False,
526
        return_dict=True,
527
528
    ):
        all_hidden_states = () if output_hidden_states else None
529
530
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
531
532

        next_decoder_cache = () if use_cache else None
533
534
535
536
537
        for i, layer_module in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            layer_head_mask = head_mask[i] if head_mask is not None else None
538
            past_key_value = past_key_values[i] if past_key_values is not None else None
539
540
541
542
            if getattr(self.config, "gradient_checkpointing", False):

                def create_custom_forward(module):
                    def custom_forward(*inputs):
543
                        return module(*inputs, past_key_value, output_attentions)
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561

                    return custom_forward

                layer_outputs = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(layer_module),
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
                )
            else:
                layer_outputs = layer_module(
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
562
                    past_key_value,
563
564
                    output_attentions,
                )
565

566
            hidden_states = layer_outputs[0]
567
568
            if use_cache:
                next_decoder_cache += (layer_outputs[-1],)
569
            if output_attentions:
570
571
572
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
573
574
575
576
577

        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        if not return_dict:
578
579
            return tuple(
                v
580
581
582
583
584
585
586
                for v in [
                    hidden_states,
                    next_decoder_cache,
                    all_hidden_states,
                    all_self_attentions,
                    all_cross_attentions,
                ]
587
588
                if v is not None
            )
589
        return BaseModelOutputWithPastAndCrossAttentions(
590
            last_hidden_state=hidden_states,
591
            past_key_values=next_decoder_cache,
592
593
594
            hidden_states=all_hidden_states,
            attentions=all_self_attentions,
            cross_attentions=all_cross_attentions,
595
        )
Lysandre Debut's avatar
Lysandre Debut committed
596
597
598
599
600
601
602
603
604
605
606
607


class ElectraDiscriminatorPredictions(nn.Module):
    """Prediction module for the discriminator, made up of two dense layers."""

    def __init__(self, config):
        super().__init__()

        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dense_prediction = nn.Linear(config.hidden_size, 1)
        self.config = config

608
    def forward(self, discriminator_hidden_states):
Lysandre Debut's avatar
Lysandre Debut committed
609
610
        hidden_states = self.dense(discriminator_hidden_states)
        hidden_states = get_activation(self.config.hidden_act)(hidden_states)
611
        logits = self.dense_prediction(hidden_states).squeeze(-1)
Lysandre Debut's avatar
Lysandre Debut committed
612
613
614
615
616
617
618
619
620
621

        return logits


class ElectraGeneratorPredictions(nn.Module):
    """Prediction module for the generator, made up of two dense layers."""

    def __init__(self, config):
        super().__init__()

622
        self.LayerNorm = nn.LayerNorm(config.embedding_size)
Lysandre Debut's avatar
Lysandre Debut committed
623
624
625
626
627
628
629
630
631
632
        self.dense = nn.Linear(config.hidden_size, config.embedding_size)

    def forward(self, generator_hidden_states):
        hidden_states = self.dense(generator_hidden_states)
        hidden_states = get_activation("gelu")(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)

        return hidden_states


633
class ElectraPreTrainedModel(PreTrainedModel):
Sylvain Gugger's avatar
Sylvain Gugger committed
634
635
636
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
Lysandre Debut's avatar
Lysandre Debut committed
637
638
639
640
641
    """

    config_class = ElectraConfig
    load_tf_weights = load_tf_weights_in_electra
    base_model_prefix = "electra"
642
643
    _keys_to_ignore_on_load_missing = [r"position_ids"]
    _keys_to_ignore_on_load_unexpected = [r"electra\.embeddings_project\.weight", r"electra\.embeddings_project\.bias"]
Lysandre Debut's avatar
Lysandre Debut committed
644

Sylvain Gugger's avatar
Sylvain Gugger committed
645
    # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
646
647
648
649
650
651
652
653
654
655
656
657
    def _init_weights(self, module):
        """ Initialize the weights """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()

Lysandre Debut's avatar
Lysandre Debut committed
658

659
@dataclass
Sylvain Gugger's avatar
Sylvain Gugger committed
660
class ElectraForPreTrainingOutput(ModelOutput):
661
    """
662
    Output type of :class:`~transformers.ElectraForPreTraining`.
663
664

    Args:
665
        loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
666
            Total loss of the ELECTRA objective.
667
        logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
668
669
670
671
672
673
674
            Prediction scores of the head (scores for each token before SoftMax).
        hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
            Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
            of shape :obj:`(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Sylvain Gugger's avatar
Sylvain Gugger committed
675
676
            Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
            sequence_length, sequence_length)`.
677
678
679
680
681

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

682
683
    loss: Optional[torch.FloatTensor] = None
    logits: torch.FloatTensor = None
684
685
686
687
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None


Lysandre Debut's avatar
Lysandre Debut committed
688
ELECTRA_START_DOCSTRING = r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
689
690
691
692
693

    This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
    methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
    pruning heads etc.)

Sylvain Gugger's avatar
Sylvain Gugger committed
694
695
696
    This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
    subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
    general usage and behavior.
Lysandre Debut's avatar
Lysandre Debut committed
697
698
699

    Parameters:
        config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model.
Sylvain Gugger's avatar
Sylvain Gugger committed
700
701
702
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
            weights.
Lysandre Debut's avatar
Lysandre Debut committed
703
704
705
706
"""

ELECTRA_INPUTS_DOCSTRING = r"""
    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
707
        input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Lysandre Debut's avatar
Lysandre Debut committed
708
709
            Indices of input sequence tokens in the vocabulary.

Sylvain Gugger's avatar
Sylvain Gugger committed
710
711
712
            Indices can be obtained using :class:`~transformers.ElectraTokenizer`. See
            :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
            details.
Lysandre Debut's avatar
Lysandre Debut committed
713
714

            `What are input IDs? <../glossary.html#input-ids>`__
Sylvain Gugger's avatar
Sylvain Gugger committed
715
        attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
716
            Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
Sylvain Gugger's avatar
Sylvain Gugger committed
717
718

            - 1 for tokens that are **not masked**,
719
            - 0 for tokens that are **masked**.
Lysandre Debut's avatar
Lysandre Debut committed
720
721

            `What are attention masks? <../glossary.html#attention-mask>`__
Sylvain Gugger's avatar
Sylvain Gugger committed
722
        token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
723
724
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
            1]``:
Sylvain Gugger's avatar
Sylvain Gugger committed
725
726
727

            - 0 corresponds to a `sentence A` token,
            - 1 corresponds to a `sentence B` token.
Lysandre Debut's avatar
Lysandre Debut committed
728
729

            `What are token type IDs? <../glossary.html#token-type-ids>`_
Sylvain Gugger's avatar
Sylvain Gugger committed
730
        position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
731
732
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
            config.max_position_embeddings - 1]``.
Lysandre Debut's avatar
Lysandre Debut committed
733
734

            `What are position IDs? <../glossary.html#position-ids>`_
735
        head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
736
            Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
Sylvain Gugger's avatar
Sylvain Gugger committed
737
738
739
740
741

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Lysandre Debut's avatar
Lysandre Debut committed
742
            Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
Sylvain Gugger's avatar
Sylvain Gugger committed
743
744
745
            This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
            vectors than the model's internal embedding lookup matrix.
        encoder_hidden_states  (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
746
747
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
Sylvain Gugger's avatar
Sylvain Gugger committed
748
        encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
749
750
            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
            the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
Sylvain Gugger's avatar
Sylvain Gugger committed
751
752
753
754

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

755
        output_attentions (:obj:`bool`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
756
757
            Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
            tensors for more detail.
758
        output_hidden_states (:obj:`bool`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
759
760
            Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
            more detail.
761
        return_dict (:obj:`bool`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
762
            Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Lysandre Debut's avatar
Lysandre Debut committed
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
"""


@add_start_docstrings(
    "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
    "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
    "hidden size and embedding size are different."
    ""
    "Both the generator and discriminator checkpoints may be loaded into this model.",
    ELECTRA_START_DOCSTRING,
)
class ElectraModel(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.embeddings = ElectraEmbeddings(config)

        if config.embedding_size != config.hidden_size:
            self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)

782
        self.encoder = ElectraEncoder(config)
Lysandre Debut's avatar
Lysandre Debut committed
783
784
785
786
787
788
789
790
791
792
        self.config = config
        self.init_weights()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
Sylvain Gugger's avatar
Sylvain Gugger committed
793
794
795
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
Lysandre Debut's avatar
Lysandre Debut committed
796
797
798
799
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

800
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
801
802
803
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
804
        output_type=BaseModelOutputWithCrossAttentions,
805
806
        config_class=_CONFIG_FOR_DOC,
    )
Lysandre Debut's avatar
Lysandre Debut committed
807
808
809
810
811
812
813
814
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
815
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
816
        output_hidden_states=None,
817
        return_dict=None,
Lysandre Debut's avatar
Lysandre Debut committed
818
    ):
819
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
Joseph Liu's avatar
Joseph Liu committed
820
821
822
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
823
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
824

Lysandre Debut's avatar
Lysandre Debut committed
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        elif input_ids is not None:
            input_shape = input_ids.size()
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        device = input_ids.device if input_ids is not None else inputs_embeds.device

        if attention_mask is None:
            attention_mask = torch.ones(input_shape, device=device)
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)

        extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
842
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
Lysandre Debut's avatar
Lysandre Debut committed
843
844
845
846
847
848
849
850

        hidden_states = self.embeddings(
            input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
        )

        if hasattr(self, "embeddings_project"):
            hidden_states = self.embeddings_project(hidden_states)

851
852
853
854
855
        hidden_states = self.encoder(
            hidden_states,
            attention_mask=extended_attention_mask,
            head_mask=head_mask,
            output_attentions=output_attentions,
Joseph Liu's avatar
Joseph Liu committed
856
            output_hidden_states=output_hidden_states,
857
            return_dict=return_dict,
858
        )
Lysandre Debut's avatar
Lysandre Debut committed
859
860
861
862

        return hidden_states


863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
class ElectraClassificationHead(nn.Module):
    """Head for sentence-level classification tasks."""

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)

    def forward(self, features, **kwargs):
        x = features[:, 0, :]  # take <s> token (equiv. to [CLS])
        x = self.dropout(x)
        x = self.dense(x)
        x = get_activation("gelu")(x)  # although BERT uses tanh here, it seems Electra authors used gelu here
        x = self.dropout(x)
        x = self.out_proj(x)
        return x


@add_start_docstrings(
Sylvain Gugger's avatar
Sylvain Gugger committed
883
884
885
886
    """
    ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    """,
887
888
889
890
891
892
893
894
895
896
897
    ELECTRA_START_DOCSTRING,
)
class ElectraForSequenceClassification(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.electra = ElectraModel(config)
        self.classifier = ElectraClassificationHead(config)

        self.init_weights()

898
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
899
900
901
902
903
904
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
        output_type=SequenceClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
    )
905
906
907
908
909
910
911
912
913
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
914
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
915
        output_hidden_states=None,
916
        return_dict=None,
917
918
    ):
        r"""
919
        labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
920
921
            Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
            config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
922
923
            If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
924
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
925

926
        discriminator_hidden_states = self.electra(
Joseph Liu's avatar
Joseph Liu committed
927
928
929
930
931
932
933
934
            input_ids,
            attention_mask,
            token_type_ids,
            position_ids,
            head_mask,
            inputs_embeds,
            output_attentions,
            output_hidden_states,
935
            return_dict,
936
937
938
939
940
        )

        sequence_output = discriminator_hidden_states[0]
        logits = self.classifier(sequence_output)

941
        loss = None
942
943
944
945
946
947
948
949
950
        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

951
        if not return_dict:
952
953
954
955
956
957
958
959
960
            output = (logits,) + discriminator_hidden_states[1:]
            return ((loss,) + output) if loss is not None else output

        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=discriminator_hidden_states.hidden_states,
            attentions=discriminator_hidden_states.attentions,
        )
961
962


Lysandre Debut's avatar
Lysandre Debut committed
963
964
@add_start_docstrings(
    """
965
    Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
Lysandre Debut's avatar
Lysandre Debut committed
966

Sylvain Gugger's avatar
Sylvain Gugger committed
967
968
    It is recommended to load the discriminator checkpoint into that model.
    """,
Lysandre Debut's avatar
Lysandre Debut committed
969
970
971
972
973
974
975
976
977
978
    ELECTRA_START_DOCSTRING,
)
class ElectraForPreTraining(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.electra = ElectraModel(config)
        self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
        self.init_weights()

979
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
Sylvain Gugger's avatar
Sylvain Gugger committed
980
    @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
Lysandre Debut's avatar
Lysandre Debut committed
981
982
983
984
985
986
987
988
989
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
990
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
991
        output_hidden_states=None,
992
        return_dict=None,
Lysandre Debut's avatar
Lysandre Debut committed
993
994
    ):
        r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
995
        labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
996
997
            Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see :obj:`input_ids`
            docstring) Indices should be in ``[0, 1]``:
Sylvain Gugger's avatar
Sylvain Gugger committed
998
999
1000

            - 0 indicates the token is an original token,
            - 1 indicates the token was replaced.
Lysandre Debut's avatar
Lysandre Debut committed
1001

Lysandre's avatar
Lysandre committed
1002
        Returns:
Lysandre Debut's avatar
Lysandre Debut committed
1003

Lysandre's avatar
Lysandre committed
1004
        Examples::
Lysandre Debut's avatar
Lysandre Debut committed
1005

Lysandre's avatar
Lysandre committed
1006
1007
            >>> from transformers import ElectraTokenizer, ElectraForPreTraining
            >>> import torch
Lysandre Debut's avatar
Lysandre Debut committed
1008

Lysandre's avatar
Lysandre committed
1009
1010
            >>> tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
            >>> model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator')
Lysandre Debut's avatar
Lysandre Debut committed
1011

Lysandre's avatar
Lysandre committed
1012
1013
            >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)  # Batch size 1
            >>> logits = model(input_ids).logits
Lysandre Debut's avatar
Lysandre Debut committed
1014
        """
1015
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
Lysandre Debut's avatar
Lysandre Debut committed
1016
1017

        discriminator_hidden_states = self.electra(
Joseph Liu's avatar
Joseph Liu committed
1018
1019
1020
1021
1022
1023
1024
1025
            input_ids,
            attention_mask,
            token_type_ids,
            position_ids,
            head_mask,
            inputs_embeds,
            output_attentions,
            output_hidden_states,
1026
            return_dict,
Lysandre Debut's avatar
Lysandre Debut committed
1027
1028
1029
        )
        discriminator_sequence_output = discriminator_hidden_states[0]

1030
        logits = self.discriminator_predictions(discriminator_sequence_output)
Lysandre Debut's avatar
Lysandre Debut committed
1031

1032
        loss = None
Lysandre Debut's avatar
Lysandre Debut committed
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
        if labels is not None:
            loss_fct = nn.BCEWithLogitsLoss()
            if attention_mask is not None:
                active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
                active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
                active_labels = labels[active_loss]
                loss = loss_fct(active_logits, active_labels.float())
            else:
                loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())

1043
        if not return_dict:
1044
1045
            output = (logits,) + discriminator_hidden_states[1:]
            return ((loss,) + output) if loss is not None else output
Lysandre Debut's avatar
Lysandre Debut committed
1046

Sylvain Gugger's avatar
Sylvain Gugger committed
1047
        return ElectraForPreTrainingOutput(
1048
1049
1050
1051
1052
            loss=loss,
            logits=logits,
            hidden_states=discriminator_hidden_states.hidden_states,
            attentions=discriminator_hidden_states.attentions,
        )
Lysandre Debut's avatar
Lysandre Debut committed
1053
1054
1055
1056
1057
1058


@add_start_docstrings(
    """
    Electra model with a language modeling head on top.

Sylvain Gugger's avatar
Sylvain Gugger committed
1059
1060
1061
    Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
    the two to have been trained for the masked language modeling task.
    """,
Lysandre Debut's avatar
Lysandre Debut committed
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
    ELECTRA_START_DOCSTRING,
)
class ElectraForMaskedLM(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.electra = ElectraModel(config)
        self.generator_predictions = ElectraGeneratorPredictions(config)

        self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
        self.init_weights()

    def get_output_embeddings(self):
        return self.generator_lm_head

1077
1078
1079
    def set_output_embeddings(self, word_embeddings):
        self.generator_lm_head = word_embeddings

1080
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1081
1082
1083
1084
1085
1086
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
        output_type=MaskedLMOutput,
        config_class=_CONFIG_FOR_DOC,
    )
Lysandre Debut's avatar
Lysandre Debut committed
1087
1088
1089
1090
1091
1092
1093
1094
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
Sylvain Gugger's avatar
Sylvain Gugger committed
1095
        labels=None,
1096
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
1097
        output_hidden_states=None,
1098
        return_dict=None,
Lysandre Debut's avatar
Lysandre Debut committed
1099
1100
    ):
        r"""
1101
        labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1102
1103
1104
            Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
            config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
            (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
Lysandre Debut's avatar
Lysandre Debut committed
1105
        """
1106
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
Lysandre Debut's avatar
Lysandre Debut committed
1107
1108

        generator_hidden_states = self.electra(
Joseph Liu's avatar
Joseph Liu committed
1109
1110
1111
1112
1113
1114
1115
1116
            input_ids,
            attention_mask,
            token_type_ids,
            position_ids,
            head_mask,
            inputs_embeds,
            output_attentions,
            output_hidden_states,
1117
            return_dict,
Lysandre Debut's avatar
Lysandre Debut committed
1118
1119
1120
1121
1122
1123
        )
        generator_sequence_output = generator_hidden_states[0]

        prediction_scores = self.generator_predictions(generator_sequence_output)
        prediction_scores = self.generator_lm_head(prediction_scores)

1124
        loss = None
Lysandre Debut's avatar
Lysandre Debut committed
1125
        # Masked language modeling softmax layer
Sylvain Gugger's avatar
Sylvain Gugger committed
1126
        if labels is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1127
            loss_fct = nn.CrossEntropyLoss()  # -100 index = padding token
Sylvain Gugger's avatar
Sylvain Gugger committed
1128
            loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
Lysandre Debut's avatar
Lysandre Debut committed
1129

1130
        if not return_dict:
1131
1132
            output = (prediction_scores,) + generator_hidden_states[1:]
            return ((loss,) + output) if loss is not None else output
Lysandre Debut's avatar
Lysandre Debut committed
1133

1134
1135
1136
1137
1138
1139
        return MaskedLMOutput(
            loss=loss,
            logits=prediction_scores,
            hidden_states=generator_hidden_states.hidden_states,
            attentions=generator_hidden_states.attentions,
        )
Lysandre Debut's avatar
Lysandre Debut committed
1140
1141
1142
1143
1144
1145


@add_start_docstrings(
    """
    Electra model with a token classification head on top.

Sylvain Gugger's avatar
Sylvain Gugger committed
1146
1147
    Both the discriminator and generator may be loaded into this model.
    """,
Lysandre Debut's avatar
Lysandre Debut committed
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
    ELECTRA_START_DOCSTRING,
)
class ElectraForTokenClassification(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.electra = ElectraModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.init_weights()

1159
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1160
1161
1162
1163
1164
1165
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
        output_type=TokenClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
    )
Lysandre Debut's avatar
Lysandre Debut committed
1166
1167
1168
1169
1170
1171
1172
1173
1174
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
1175
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
1176
        output_hidden_states=None,
1177
        return_dict=None,
Lysandre Debut's avatar
Lysandre Debut committed
1178
1179
    ):
        r"""
1180
        labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1181
1182
            Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
            1]``.
Lysandre Debut's avatar
Lysandre Debut committed
1183
        """
1184
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
Lysandre Debut's avatar
Lysandre Debut committed
1185
1186

        discriminator_hidden_states = self.electra(
Joseph Liu's avatar
Joseph Liu committed
1187
1188
1189
1190
1191
1192
1193
1194
            input_ids,
            attention_mask,
            token_type_ids,
            position_ids,
            head_mask,
            inputs_embeds,
            output_attentions,
            output_hidden_states,
1195
            return_dict,
Lysandre Debut's avatar
Lysandre Debut committed
1196
1197
1198
1199
1200
1201
        )
        discriminator_sequence_output = discriminator_hidden_states[0]

        discriminator_sequence_output = self.dropout(discriminator_sequence_output)
        logits = self.classifier(discriminator_sequence_output)

1202
        loss = None
Lysandre Debut's avatar
Lysandre Debut committed
1203
1204
1205
1206
1207
1208
1209
1210
1211
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.config.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
LysandreJik's avatar
LysandreJik committed
1212
                loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
Lysandre Debut's avatar
Lysandre Debut committed
1213

1214
        if not return_dict:
1215
1216
            output = (logits,) + discriminator_hidden_states[1:]
            return ((loss,) + output) if loss is not None else output
Lysandre Debut's avatar
Lysandre Debut committed
1217

1218
1219
1220
1221
1222
1223
        return TokenClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=discriminator_hidden_states.hidden_states,
            attentions=discriminator_hidden_states.attentions,
        )
1224
1225
1226


@add_start_docstrings(
1227
1228
    """
    ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
Sylvain Gugger's avatar
Sylvain Gugger committed
1229
1230
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    """,
1231
    ELECTRA_START_DOCSTRING,
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
)
class ElectraForQuestionAnswering(ElectraPreTrainedModel):
    config_class = ElectraConfig
    base_model_prefix = "electra"

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.electra = ElectraModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        self.init_weights()

1246
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1247
1248
1249
1250
1251
1252
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
        output_type=QuestionAnsweringModelOutput,
        config_class=_CONFIG_FOR_DOC,
    )
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        start_positions=None,
        end_positions=None,
        output_attentions=None,
Joseph Liu's avatar
Joseph Liu committed
1264
        output_hidden_states=None,
1265
        return_dict=None,
1266
1267
    ):
        r"""
1268
        start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1269
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
Sylvain Gugger's avatar
Sylvain Gugger committed
1270
1271
            Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
            sequence are not taken into account for computing the loss.
1272
        end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1273
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
Sylvain Gugger's avatar
Sylvain Gugger committed
1274
1275
            Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
            sequence are not taken into account for computing the loss.
1276
        """
1277
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1278
1279
1280
1281
1282
1283
1284
1285
1286

        discriminator_hidden_states = self.electra(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
Joseph Liu's avatar
Joseph Liu committed
1287
            output_hidden_states=output_hidden_states,
1288
1289
1290
1291
1292
1293
1294
1295
1296
        )

        sequence_output = discriminator_hidden_states[0]

        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

1297
        total_loss = None
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

1314
        if not return_dict:
Lysandre's avatar
Lysandre committed
1315
1316
1317
1318
            output = (
                start_logits,
                end_logits,
            ) + discriminator_hidden_states[1:]
1319
1320
1321
1322
1323
1324
1325
1326
1327
            return ((total_loss,) + output) if total_loss is not None else output

        return QuestionAnsweringModelOutput(
            loss=total_loss,
            start_logits=start_logits,
            end_logits=end_logits,
            hidden_states=discriminator_hidden_states.hidden_states,
            attentions=discriminator_hidden_states.attentions,
        )
Suraj Patil's avatar
Suraj Patil committed
1328
1329
1330


@add_start_docstrings(
Sylvain Gugger's avatar
Sylvain Gugger committed
1331
1332
1333
1334
    """
    ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
    softmax) e.g. for RocStories/SWAG tasks.
    """,
Sylvain Gugger's avatar
Sylvain Gugger committed
1335
    ELECTRA_START_DOCSTRING,
Suraj Patil's avatar
Suraj Patil committed
1336
1337
1338
1339
1340
1341
)
class ElectraForMultipleChoice(ElectraPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.electra = ElectraModel(config)
Julien Plu's avatar
Julien Plu committed
1342
        self.sequence_summary = SequenceSummary(config)
Suraj Patil's avatar
Suraj Patil committed
1343
1344
1345
1346
        self.classifier = nn.Linear(config.hidden_size, 1)

        self.init_weights()

1347
    @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1348
1349
1350
1351
1352
1353
    @add_code_sample_docstrings(
        tokenizer_class=_TOKENIZER_FOR_DOC,
        checkpoint="google/electra-small-discriminator",
        output_type=MultipleChoiceModelOutput,
        config_class=_CONFIG_FOR_DOC,
    )
Suraj Patil's avatar
Suraj Patil committed
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
        output_attentions=None,
1364
        output_hidden_states=None,
1365
        return_dict=None,
Suraj Patil's avatar
Suraj Patil committed
1366
1367
    ):
        r"""
1368
        labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1369
1370
1371
            Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
            num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
            :obj:`input_ids` above)
Suraj Patil's avatar
Suraj Patil committed
1372
        """
1373
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
Suraj Patil's avatar
Suraj Patil committed
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
            if inputs_embeds is not None
            else None
        )

        discriminator_hidden_states = self.electra(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
1394
            output_hidden_states=output_hidden_states,
1395
            return_dict=return_dict,
Suraj Patil's avatar
Suraj Patil committed
1396
1397
1398
1399
        )

        sequence_output = discriminator_hidden_states[0]

Julien Plu's avatar
Julien Plu committed
1400
        pooled_output = self.sequence_summary(sequence_output)
Suraj Patil's avatar
Suraj Patil committed
1401
1402
1403
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)

1404
        loss = None
Suraj Patil's avatar
Suraj Patil committed
1405
1406
1407
1408
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)

1409
        if not return_dict:
1410
1411
1412
1413
1414
1415
1416
1417
1418
            output = (reshaped_logits,) + discriminator_hidden_states[1:]
            return ((loss,) + output) if loss is not None else output

        return MultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=discriminator_hidden_states.hidden_states,
            attentions=discriminator_hidden_states.attentions,
        )