Unverified Commit 0735def8 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[EncoderDecoder] Add encoder-decoder for roberta/ vanilla longformer (#6411)

* add encoder-decoder for roberta

* fix headmask

* apply Sylvains suggestions

* fix typo

* Apply suggestions from code review
parent fd3de200
...@@ -63,6 +63,13 @@ RobertaModel ...@@ -63,6 +63,13 @@ RobertaModel
:members: :members:
RobertaForCausalLM
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.RobertaForCausalLM
:members:
RobertaForMaskedLM RobertaForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
......
...@@ -302,6 +302,7 @@ if is_torch_available(): ...@@ -302,6 +302,7 @@ if is_torch_available():
from .tokenization_marian import MarianTokenizer from .tokenization_marian import MarianTokenizer
from .modeling_roberta import ( from .modeling_roberta import (
RobertaForMaskedLM, RobertaForMaskedLM,
RobertaForCausalLM,
RobertaModel, RobertaModel,
RobertaForSequenceClassification, RobertaForSequenceClassification,
RobertaForMultipleChoice, RobertaForMultipleChoice,
......
...@@ -135,6 +135,7 @@ from .modeling_reformer import ( ...@@ -135,6 +135,7 @@ from .modeling_reformer import (
) )
from .modeling_retribert import RetriBertModel from .modeling_retribert import RetriBertModel
from .modeling_roberta import ( from .modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM, RobertaForMaskedLM,
RobertaForMultipleChoice, RobertaForMultipleChoice,
RobertaForQuestionAnswering, RobertaForQuestionAnswering,
...@@ -250,6 +251,7 @@ MODEL_WITH_LM_HEAD_MAPPING = OrderedDict( ...@@ -250,6 +251,7 @@ MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict( MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
[ [
(RobertaConfig, RobertaForCausalLM),
(BertConfig, BertLMHeadModel), (BertConfig, BertLMHeadModel),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel), (OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel), (GPT2Config, GPT2LMHeadModel),
......
...@@ -683,14 +683,6 @@ BERT_INPUTS_DOCSTRING = r""" ...@@ -683,14 +683,6 @@ BERT_INPUTS_DOCSTRING = r"""
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix. than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`): output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):
...@@ -769,6 +761,16 @@ class BertModel(BertPreTrainedModel): ...@@ -769,6 +761,16 @@ class BertModel(BertPreTrainedModel):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
): ):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = ( output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
...@@ -956,7 +958,7 @@ class BertLMHeadModel(BertPreTrainedModel): ...@@ -956,7 +958,7 @@ class BertLMHeadModel(BertPreTrainedModel):
super().__init__(config) super().__init__(config)
if not config.is_decoder: if not config.is_decoder:
logger.info("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`") logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config) self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config) self.cls = BertOnlyMLMHead(config)
...@@ -976,22 +978,27 @@ class BertLMHeadModel(BertPreTrainedModel): ...@@ -976,22 +978,27 @@ class BertLMHeadModel(BertPreTrainedModel):
position_ids=None, position_ids=None,
head_mask=None, head_mask=None,
inputs_embeds=None, inputs_embeds=None,
labels=None,
encoder_hidden_states=None, encoder_hidden_states=None,
encoder_attention_mask=None, encoder_attention_mask=None,
labels=None,
output_attentions=None, output_attentions=None,
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
**kwargs
): ):
r""" r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction). Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]`` in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns: Returns:
...@@ -1061,8 +1068,8 @@ class BertForMaskedLM(BertPreTrainedModel): ...@@ -1061,8 +1068,8 @@ class BertForMaskedLM(BertPreTrainedModel):
super().__init__(config) super().__init__(config)
if config.is_decoder: if config.is_decoder:
logger.info( logger.warning(
"If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention." "bi-directional self-attention."
) )
...@@ -1089,9 +1096,9 @@ class BertForMaskedLM(BertPreTrainedModel): ...@@ -1089,9 +1096,9 @@ class BertForMaskedLM(BertPreTrainedModel):
position_ids=None, position_ids=None,
head_mask=None, head_mask=None,
inputs_embeds=None, inputs_embeds=None,
labels=None,
encoder_hidden_states=None, encoder_hidden_states=None,
encoder_attention_mask=None, encoder_attention_mask=None,
labels=None,
output_attentions=None, output_attentions=None,
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
......
...@@ -191,11 +191,9 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -191,11 +191,9 @@ class EncoderDecoderModel(PreTrainedModel):
input_ids=None, input_ids=None,
inputs_embeds=None, inputs_embeds=None,
attention_mask=None, attention_mask=None,
head_mask=None,
encoder_outputs=None, encoder_outputs=None,
decoder_input_ids=None, decoder_input_ids=None,
decoder_attention_mask=None, decoder_attention_mask=None,
decoder_head_mask=None,
decoder_inputs_embeds=None, decoder_inputs_embeds=None,
labels=None, labels=None,
**kwargs, **kwargs,
...@@ -216,10 +214,6 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -216,10 +214,6 @@ class EncoderDecoderModel(PreTrainedModel):
Mask to avoid performing attention on padding token indices for the encoder. Mask to avoid performing attention on padding token indices for the encoder.
Mask values selected in ``[0, 1]``: Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules for the encoder.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`): encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):
Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`) Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)
`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder. `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.
...@@ -231,10 +225,6 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -231,10 +225,6 @@ class EncoderDecoderModel(PreTrainedModel):
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`): decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):
Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default. Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.
decoder_head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules for the decoder.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors
...@@ -279,7 +269,6 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -279,7 +269,6 @@ class EncoderDecoderModel(PreTrainedModel):
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
inputs_embeds=inputs_embeds, inputs_embeds=inputs_embeds,
head_mask=head_mask,
return_dict=False, return_dict=False,
**kwargs_encoder, **kwargs_encoder,
) )
...@@ -293,7 +282,6 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -293,7 +282,6 @@ class EncoderDecoderModel(PreTrainedModel):
attention_mask=decoder_attention_mask, attention_mask=decoder_attention_mask,
encoder_hidden_states=hidden_states, encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask, encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
labels=labels, labels=labels,
return_dict=False, return_dict=False,
**kwargs_decoder, **kwargs_decoder,
......
...@@ -24,9 +24,15 @@ import torch.nn as nn ...@@ -24,9 +24,15 @@ import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import CrossEntropyLoss, MSELoss
from .configuration_roberta import RobertaConfig from .configuration_roberta import RobertaConfig
from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable from .file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu
from .modeling_outputs import ( from .modeling_outputs import (
CausalLMOutput,
MaskedLMOutput, MaskedLMOutput,
MultipleChoiceModelOutput, MultipleChoiceModelOutput,
QuestionAnsweringModelOutput, QuestionAnsweringModelOutput,
...@@ -175,6 +181,121 @@ class RobertaModel(BertModel): ...@@ -175,6 +181,121 @@ class RobertaModel(BertModel):
self.embeddings.word_embeddings = value self.embeddings.word_embeddings = value
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaLMHeadModel, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaLMHeadModel.from_pretrained('roberta-base', config=config, return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutput(
loss=lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING) @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(BertPreTrainedModel): class RobertaForMaskedLM(BertPreTrainedModel):
config_class = RobertaConfig config_class = RobertaConfig
...@@ -183,6 +304,12 @@ class RobertaForMaskedLM(BertPreTrainedModel): ...@@ -183,6 +304,12 @@ class RobertaForMaskedLM(BertPreTrainedModel):
def __init__(self, config): def __init__(self, config):
super().__init__(config) super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config) self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config) self.lm_head = RobertaLMHead(config)
...@@ -206,6 +333,8 @@ class RobertaForMaskedLM(BertPreTrainedModel): ...@@ -206,6 +333,8 @@ class RobertaForMaskedLM(BertPreTrainedModel):
position_ids=None, position_ids=None,
head_mask=None, head_mask=None,
inputs_embeds=None, inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None, labels=None,
output_attentions=None, output_attentions=None,
output_hidden_states=None, output_hidden_states=None,
...@@ -237,6 +366,8 @@ class RobertaForMaskedLM(BertPreTrainedModel): ...@@ -237,6 +366,8 @@ class RobertaForMaskedLM(BertPreTrainedModel):
position_ids=position_ids, position_ids=position_ids,
head_mask=head_mask, head_mask=head_mask,
inputs_embeds=inputs_embeds, inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions, output_attentions=output_attentions,
output_hidden_states=output_hidden_states, output_hidden_states=output_hidden_states,
return_dict=return_dict, return_dict=return_dict,
......
...@@ -862,7 +862,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss): ...@@ -862,7 +862,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
super().__init__(config, *inputs, **kwargs) super().__init__(config, *inputs, **kwargs)
if config.is_decoder: if config.is_decoder:
logger.info( logger.warning(
"If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention." "bi-directional self-attention."
) )
...@@ -941,7 +941,7 @@ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss): ...@@ -941,7 +941,7 @@ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
super().__init__(config, *inputs, **kwargs) super().__init__(config, *inputs, **kwargs)
if not config.is_decoder: if not config.is_decoder:
logger.info("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`") logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = TFBertMainLayer(config, name="bert") self.bert = TFBertMainLayer(config, name="bert")
self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls") self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls")
......
...@@ -152,7 +152,7 @@ class BertModelTester: ...@@ -152,7 +152,7 @@ class BertModelTester:
encoder_attention_mask, encoder_attention_mask,
) )
def create_and_check_bert_model( def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = BertModel(config=config) model = BertModel(config=config)
...@@ -164,7 +164,7 @@ class BertModelTester: ...@@ -164,7 +164,7 @@ class BertModelTester:
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_bert_model_as_decoder( def create_and_check_model_as_decoder(
self, self,
config, config,
input_ids, input_ids,
...@@ -197,7 +197,7 @@ class BertModelTester: ...@@ -197,7 +197,7 @@ class BertModelTester:
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_bert_for_causal_lm( def create_and_check_for_causal_lm(
self, self,
config, config,
input_ids, input_ids,
...@@ -215,7 +215,7 @@ class BertModelTester: ...@@ -215,7 +215,7 @@ class BertModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_bert_for_masked_lm( def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = BertForMaskedLM(config=config) model = BertForMaskedLM(config=config)
...@@ -224,7 +224,7 @@ class BertModelTester: ...@@ -224,7 +224,7 @@ class BertModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_bert_model_for_causal_lm_as_decoder( def create_and_check_model_for_causal_lm_as_decoder(
self, self,
config, config,
input_ids, input_ids,
...@@ -257,7 +257,7 @@ class BertModelTester: ...@@ -257,7 +257,7 @@ class BertModelTester:
) )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_bert_for_next_sequence_prediction( def create_and_check_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = BertForNextSentencePrediction(config=config) model = BertForNextSentencePrediction(config=config)
...@@ -268,7 +268,7 @@ class BertModelTester: ...@@ -268,7 +268,7 @@ class BertModelTester:
) )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_bert_for_pretraining( def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = BertForPreTraining(config=config) model = BertForPreTraining(config=config)
...@@ -284,7 +284,7 @@ class BertModelTester: ...@@ -284,7 +284,7 @@ class BertModelTester:
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_bert_for_question_answering( def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = BertForQuestionAnswering(config=config) model = BertForQuestionAnswering(config=config)
...@@ -300,7 +300,7 @@ class BertModelTester: ...@@ -300,7 +300,7 @@ class BertModelTester:
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_bert_for_sequence_classification( def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
config.num_labels = self.num_labels config.num_labels = self.num_labels
...@@ -310,7 +310,7 @@ class BertModelTester: ...@@ -310,7 +310,7 @@ class BertModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_bert_for_token_classification( def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
config.num_labels = self.num_labels config.num_labels = self.num_labels
...@@ -320,7 +320,7 @@ class BertModelTester: ...@@ -320,7 +320,7 @@ class BertModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_bert_for_multiple_choice( def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
config.num_choices = self.num_choices config.num_choices = self.num_choices
...@@ -379,15 +379,15 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -379,15 +379,15 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase):
def test_config(self): def test_config(self):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
def test_bert_model(self): def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
def test_bert_model_as_decoder(self): def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_model_as_decoder(*config_and_inputs) self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_bert_model_as_decoder_with_default_input_mask(self): def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3 # This regression test was failing with PyTorch < 1.3
( (
config, config,
...@@ -403,7 +403,7 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -403,7 +403,7 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase):
input_mask = None input_mask = None
self.model_tester.create_and_check_bert_model_as_decoder( self.model_tester.create_and_check_model_as_decoder(
config, config,
input_ids, input_ids,
token_type_ids, token_type_ids,
...@@ -417,39 +417,39 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -417,39 +417,39 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase):
def test_for_causal_lm(self): def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_for_causal_lm(*config_and_inputs) self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_for_masked_lm(self): def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_causal_lm_decoder(self): def test_for_causal_lm_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_model_for_causal_lm_as_decoder(*config_and_inputs) self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs)
def test_for_multiple_choice(self): def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs) self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self): def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs) self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self): def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs) self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_question_answering(self): def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs) self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self): def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs) self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self): def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
......
...@@ -24,60 +24,34 @@ from transformers.testing_utils import require_torch, slow, torch_device ...@@ -24,60 +24,34 @@ from transformers.testing_utils import require_torch, slow, torch_device
# for now only run module with pytest tests/test_modeling_encoder_decoder.py::EncoderDecoderModelTest # for now only run module with pytest tests/test_modeling_encoder_decoder.py::EncoderDecoderModelTest
from .test_modeling_bert import BertModelTester from .test_modeling_bert import BertModelTester
from .test_modeling_common import ids_tensor from .test_modeling_common import ids_tensor
from .test_modeling_roberta import RobertaModelTester
if is_torch_available(): if is_torch_available():
from transformers import BertModel, EncoderDecoderModel, EncoderDecoderConfig from transformers import (
from transformers.modeling_bert import BertLMHeadModel BertModel,
BertLMHeadModel,
RobertaModel,
RobertaForCausalLM,
EncoderDecoderModel,
EncoderDecoderConfig,
)
import numpy as np import numpy as np
import torch import torch
@require_torch @require_torch
class EncoderDecoderModelTest(unittest.TestCase): class EncoderDecoderMixin:
def prepare_config_and_inputs_bert(self): def get_encoder_decoder_model(self, config, decoder_config):
bert_model_tester = BertModelTester(self) pass
encoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_token_type_ids,
decoder_input_mask,
decoder_sequence_labels,
decoder_token_labels,
decoder_choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
# make sure that cross attention layers are added def prepare_config_and_inputs(self):
decoder_config.add_cross_attention = True pass
return {
"config": config, def get_pretrained_model(self):
"input_ids": input_ids, pass
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_token_type_ids": decoder_token_type_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_sequence_labels": decoder_sequence_labels,
"decoder_token_labels": decoder_token_labels,
"decoder_choice_labels": decoder_choice_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
def create_and_check_bert_encoder_decoder_model_from_pretrained_configs( def check_encoder_decoder_model_from_pretrained_configs(
self, self,
config, config,
input_ids, input_ids,
...@@ -107,7 +81,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -107,7 +81,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model( def check_encoder_decoder_model(
self, self,
config, config,
input_ids, input_ids,
...@@ -118,8 +92,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -118,8 +92,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
decoder_attention_mask, decoder_attention_mask,
**kwargs **kwargs
): ):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.is_decoder)
self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention)
...@@ -145,7 +118,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -145,7 +118,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model_from_pretrained( def check_encoder_decoder_model_from_pretrained(
self, self,
config, config,
input_ids, input_ids,
...@@ -156,8 +129,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -156,8 +129,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
decoder_attention_mask, decoder_attention_mask,
**kwargs **kwargs
): ):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model}
enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs)
enc_dec_model.to(torch_device) enc_dec_model.to(torch_device)
...@@ -171,7 +143,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -171,7 +143,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_save_and_load( def check_save_and_load(
self, self,
config, config,
input_ids, input_ids,
...@@ -182,8 +154,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -182,8 +154,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
decoder_attention_mask, decoder_attention_mask,
**kwargs **kwargs
): ):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device) enc_dec_model.to(torch_device)
enc_dec_model.eval() enc_dec_model.eval()
...@@ -212,7 +183,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -212,7 +183,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
max_diff = np.amax(np.abs(out_1 - out_2)) max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5) self.assertLessEqual(max_diff, 1e-5)
def create_and_check_save_and_load_encoder_decoder_model( def check_save_and_load_encoder_decoder_model(
self, self,
config, config,
input_ids, input_ids,
...@@ -223,8 +194,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -223,8 +194,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
decoder_attention_mask, decoder_attention_mask,
**kwargs **kwargs
): ):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device) enc_dec_model.to(torch_device)
enc_dec_model.eval() enc_dec_model.eval()
...@@ -257,7 +227,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -257,7 +227,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
max_diff = np.amax(np.abs(out_1 - out_2)) max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5) self.assertLessEqual(max_diff, 1e-5)
def create_and_check_bert_encoder_decoder_model_labels( def check_encoder_decoder_model_labels(
self, self,
config, config,
input_ids, input_ids,
...@@ -269,8 +239,7 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -269,8 +239,7 @@ class EncoderDecoderModelTest(unittest.TestCase):
labels, labels,
**kwargs **kwargs
): ):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device) enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model( outputs_encoder_decoder = enc_dec_model(
...@@ -288,9 +257,8 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -288,9 +257,8 @@ class EncoderDecoderModelTest(unittest.TestCase):
self.assertEqual(outputs_encoder_decoder[1].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))) self.assertEqual(outputs_encoder_decoder[1].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[2].shape, (input_ids.shape + (config.hidden_size,))) self.assertEqual(outputs_encoder_decoder[2].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs): def check_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs):
encoder_model = BertModel(config) encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device) enc_dec_model.to(torch_device)
...@@ -300,47 +268,37 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -300,47 +268,37 @@ class EncoderDecoderModelTest(unittest.TestCase):
) )
self.assertEqual(generated_output.shape, (input_ids.shape[0],) + (decoder_config.max_length,)) self.assertEqual(generated_output.shape, (input_ids.shape[0],) + (decoder_config.max_length,))
def test_bert_encoder_decoder_model(self): def test_encoder_decoder_model(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_bert_encoder_decoder_model(**input_ids_dict) self.check_encoder_decoder_model(**input_ids_dict)
def test_bert_encoder_decoder_model_from_pretrained_configs(self): def test_encoder_decoder_model_from_pretrained_configs(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_bert_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict)
def test_bert_encoder_decoder_model_from_pretrained(self): def test_encoder_decoder_model_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_bert_encoder_decoder_model_from_pretrained(**input_ids_dict) self.check_encoder_decoder_model_from_pretrained(**input_ids_dict)
def test_save_and_load_from_pretrained(self): def test_save_and_load_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_save_and_load(**input_ids_dict) self.check_save_and_load(**input_ids_dict)
def test_save_and_load_from_encoder_decoder_pretrained(self): def test_save_and_load_from_encoder_decoder_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_save_and_load_encoder_decoder_model(**input_ids_dict) self.check_save_and_load_encoder_decoder_model(**input_ids_dict)
def test_bert_encoder_decoder_model_labels(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model_labels(**input_ids_dict)
def test_bert_encoder_decoder_model_generate(self): def test_encoder_decoder_model_labels(self):
input_ids_dict = self.prepare_config_and_inputs_bert() input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_bert_encoder_decoder_model_generate(**input_ids_dict) self.check_encoder_decoder_model_labels(**input_ids_dict)
@slow
def test_real_bert_model_from_pretrained(self):
model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
self.assertIsNotNone(model)
@slow def test_encoder_decoder_model_generate(self):
def test_real_bert_model_from_pretrained_add_cross_attention(self): input_ids_dict = self.prepare_config_and_inputs()
model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") self.check_encoder_decoder_model_generate(**input_ids_dict)
self.assertTrue(hasattr(model.decoder.bert.encoder.layer[0], "crossattention"))
@slow @slow
def test_real_bert_model_save_load_from_pretrained(self): def test_real_model_save_load_from_pretrained(self):
model_2 = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") model_2 = self.get_pretrained_model()
model_2.to(torch_device) model_2.to(torch_device)
input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size) input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size)
decoder_input_ids = ids_tensor([13, 1], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.encoder.vocab_size)
...@@ -362,3 +320,107 @@ class EncoderDecoderModelTest(unittest.TestCase): ...@@ -362,3 +320,107 @@ class EncoderDecoderModelTest(unittest.TestCase):
out_1[np.isnan(out_1)] = 0 out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2)) max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5) self.assertLessEqual(max_diff, 1e-5)
class BertEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase):
def get_pretrained_model(self):
return EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "bert-base-cased")
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester = BertModelTester(self)
encoder_config_and_inputs = model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_token_type_ids,
decoder_input_mask,
decoder_sequence_labels,
decoder_token_labels,
decoder_choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_token_type_ids": decoder_token_type_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_sequence_labels": decoder_sequence_labels,
"decoder_token_labels": decoder_token_labels,
"decoder_choice_labels": decoder_choice_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
class RoBertaEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase):
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = RobertaModel(config)
decoder_model = RobertaForCausalLM(decoder_config)
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester = RobertaModelTester(self)
encoder_config_and_inputs = model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_token_type_ids,
decoder_input_mask,
decoder_sequence_labels,
decoder_token_labels,
decoder_choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_token_type_ids": decoder_token_type_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_sequence_labels": decoder_sequence_labels,
"decoder_token_labels": decoder_token_labels,
"decoder_choice_labels": decoder_choice_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
def get_pretrained_model(self):
return EncoderDecoderModel.from_encoder_decoder_pretrained("roberta-base", "roberta-base")
...@@ -20,7 +20,7 @@ from transformers import is_torch_available ...@@ -20,7 +20,7 @@ from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available(): if is_torch_available():
...@@ -28,6 +28,7 @@ if is_torch_available(): ...@@ -28,6 +28,7 @@ if is_torch_available():
from transformers import ( from transformers import (
RobertaConfig, RobertaConfig,
RobertaModel, RobertaModel,
RobertaForCausalLM,
RobertaForMaskedLM, RobertaForMaskedLM,
RobertaForMultipleChoice, RobertaForMultipleChoice,
RobertaForQuestionAnswering, RobertaForQuestionAnswering,
...@@ -101,7 +102,34 @@ class RobertaModelTester: ...@@ -101,7 +102,34 @@ class RobertaModelTester:
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_roberta_model( def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = RobertaModel(config=config) model = RobertaModel(config=config)
...@@ -114,7 +142,58 @@ class RobertaModelTester: ...@@ -114,7 +142,58 @@ class RobertaModelTester:
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_roberta_for_masked_lm( def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RobertaModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = RobertaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = RobertaForMaskedLM(config=config) model = RobertaForMaskedLM(config=config)
...@@ -123,7 +202,7 @@ class RobertaModelTester: ...@@ -123,7 +202,7 @@ class RobertaModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_roberta_for_token_classification( def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
config.num_labels = self.num_labels config.num_labels = self.num_labels
...@@ -133,7 +212,7 @@ class RobertaModelTester: ...@@ -133,7 +212,7 @@ class RobertaModelTester:
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_roberta_for_multiple_choice( def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
config.num_choices = self.num_choices config.num_choices = self.num_choices
...@@ -151,7 +230,7 @@ class RobertaModelTester: ...@@ -151,7 +230,7 @@ class RobertaModelTester:
) )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_roberta_for_question_answering( def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
): ):
model = RobertaForQuestionAnswering(config=config) model = RobertaForQuestionAnswering(config=config)
...@@ -187,6 +266,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -187,6 +266,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
RobertaForCausalLM,
RobertaForMaskedLM, RobertaForMaskedLM,
RobertaModel, RobertaModel,
RobertaForSequenceClassification, RobertaForSequenceClassification,
...@@ -205,25 +285,61 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -205,25 +285,61 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
def test_config(self): def test_config(self):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
def test_roberta_model(self): def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_for_masked_lm(self): def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self): def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs) self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self): def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs) self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self): def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs) self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment