Commit 28e608a2 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Remove trailing whitespace from all Python files.

Fixes flake8 warning W291 (x224).
parent 1efa0a75
This diff is collapsed.
...@@ -14,7 +14,7 @@ dependencies = ["torch", "tqdm", "boto3", "requests", "regex", "sentencepiece", ...@@ -14,7 +14,7 @@ dependencies = ["torch", "tqdm", "boto3", "requests", "regex", "sentencepiece",
@add_start_docstrings(AutoConfig.__doc__) @add_start_docstrings(AutoConfig.__doc__)
def config(*args, **kwargs): def config(*args, **kwargs):
r""" r"""
# Using torch.hub ! # Using torch.hub !
import torch import torch
...@@ -34,7 +34,7 @@ def config(*args, **kwargs): ...@@ -34,7 +34,7 @@ def config(*args, **kwargs):
@add_start_docstrings(AutoTokenizer.__doc__) @add_start_docstrings(AutoTokenizer.__doc__)
def tokenizer(*args, **kwargs): def tokenizer(*args, **kwargs):
r""" r"""
# Using torch.hub ! # Using torch.hub !
import torch import torch
......
...@@ -216,7 +216,7 @@ XXX_START_DOCSTRING = r""" The XXX model was proposed in ...@@ -216,7 +216,7 @@ XXX_START_DOCSTRING = r""" The XXX model was proposed in
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters: Parameters:
config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -230,13 +230,13 @@ XXX_INPUTS_DOCSTRING = r""" ...@@ -230,13 +230,13 @@ XXX_INPUTS_DOCSTRING = r"""
(a) For sequence pairs: (a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences: (b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]`` ``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0`` ``token_type_ids: 0 0 0 0 0 0 0``
Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on
......
...@@ -198,7 +198,7 @@ XXX_START_DOCSTRING = r""" The XXX model was proposed in ...@@ -198,7 +198,7 @@ XXX_START_DOCSTRING = r""" The XXX model was proposed in
https://pytorch.org/docs/stable/nn.html#module https://pytorch.org/docs/stable/nn.html#module
Parameters: Parameters:
config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -212,13 +212,13 @@ XXX_INPUTS_DOCSTRING = r""" ...@@ -212,13 +212,13 @@ XXX_INPUTS_DOCSTRING = r"""
(a) For sequence pairs: (a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences: (b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]`` ``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0`` ``token_type_ids: 0 0 0 0 0 0 0``
Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on
...@@ -670,9 +670,9 @@ class XxxForQuestionAnswering(XxxPreTrainedModel): ...@@ -670,9 +670,9 @@ class XxxForQuestionAnswering(XxxPreTrainedModel):
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text) input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids) all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet # a nice puppet
......
...@@ -49,11 +49,11 @@ class LoginCommand(BaseUserCommand): ...@@ -49,11 +49,11 @@ class LoginCommand(BaseUserCommand):
def run(self): def run(self):
print( print(
""" """
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
""" """
) )
......
...@@ -281,7 +281,7 @@ def squad_convert_examples_to_features( ...@@ -281,7 +281,7 @@ def squad_convert_examples_to_features(
processor = SquadV2Processor() processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir) examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features( features = squad_convert_examples_to_features(
examples=examples, examples=examples,
tokenizer=tokenizer, tokenizer=tokenizer,
max_seq_length=args.max_seq_length, max_seq_length=args.max_seq_length,
...@@ -640,8 +640,8 @@ class SquadFeatures(object): ...@@ -640,8 +640,8 @@ class SquadFeatures(object):
has more information related to that token and should be prioritized over this feature for that token. has more information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index start_position: start of the answer token index
end_position: end of the answer token index end_position: end of the answer token index
""" """
def __init__( def __init__(
......
...@@ -396,7 +396,7 @@ ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in ...@@ -396,7 +396,7 @@ ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in
https://pytorch.org/docs/stable/nn.html#module https://pytorch.org/docs/stable/nn.html#module
Parameters: Parameters:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -410,13 +410,13 @@ ALBERT_INPUTS_DOCSTRING = r""" ...@@ -410,13 +410,13 @@ ALBERT_INPUTS_DOCSTRING = r"""
(a) For sequence pairs: (a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences: (b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]`` ``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0`` ``token_type_ids: 0 0 0 0 0 0 0``
Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on
...@@ -796,9 +796,9 @@ class AlbertForQuestionAnswering(AlbertPreTrainedModel): ...@@ -796,9 +796,9 @@ class AlbertForQuestionAnswering(AlbertPreTrainedModel):
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text) input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids) all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet # a nice puppet
......
...@@ -864,7 +864,7 @@ class AutoModelForTokenClassification: ...@@ -864,7 +864,7 @@ class AutoModelForTokenClassification:
def from_config(cls, config): def from_config(cls, config):
r""" Instantiates one of the base model classes of the library r""" Instantiates one of the base model classes of the library
from a configuration. from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class: The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model) - isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
...@@ -874,7 +874,7 @@ class AutoModelForTokenClassification: ...@@ -874,7 +874,7 @@ class AutoModelForTokenClassification:
- isInstance of `roberta` configuration class: RobertaModel (Roberta model) - isInstance of `roberta` configuration class: RobertaModel (Roberta model)
Examples:: Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = AutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
""" """
......
...@@ -40,9 +40,9 @@ CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { ...@@ -40,9 +40,9 @@ CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in
`CamemBERT: a Tasty French Language Model`_ `CamemBERT: a Tasty French Language Model`_
by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah, and Benoît Sagot. It is based on Facebook's RoBERTa model released in 2019. by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah, and Benoît Sagot. It is based on Facebook's RoBERTa model released in 2019.
It is a model trained on 138GB of French text. It is a model trained on 138GB of French text.
This implementation is the same as RoBERTa. This implementation is the same as RoBERTa.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
...@@ -55,7 +55,7 @@ CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in ...@@ -55,7 +55,7 @@ CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in
https://pytorch.org/docs/stable/nn.html#module https://pytorch.org/docs/stable/nn.html#module
Parameters: Parameters:
config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration. model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -74,7 +74,7 @@ CAMEMBERT_INPUTS_DOCSTRING = r""" ...@@ -74,7 +74,7 @@ CAMEMBERT_INPUTS_DOCSTRING = r"""
``tokens: <s> the dog is hairy . </s>`` ``tokens: <s> the dog is hairy . </s>``
Fully encoded sequences or sequence pairs can be obtained using the CamembertTokenizer.encode function with Fully encoded sequences or sequence pairs can be obtained using the CamembertTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``. the ``add_special_tokens`` parameter set to ``True``.
CamemBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on CamemBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
...@@ -199,7 +199,7 @@ class CamembertForMaskedLM(RobertaForMaskedLM): ...@@ -199,7 +199,7 @@ class CamembertForMaskedLM(RobertaForMaskedLM):
@add_start_docstrings( @add_start_docstrings(
"""CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer """CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """, on top of the pooled output) e.g. for GLUE tasks. """,
CAMEMBERT_START_DOCSTRING, CAMEMBERT_START_DOCSTRING,
CAMEMBERT_INPUTS_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING,
......
...@@ -192,7 +192,7 @@ class CTRLPreTrainedModel(PreTrainedModel): ...@@ -192,7 +192,7 @@ class CTRLPreTrainedModel(PreTrainedModel):
module.weight.data.fill_(1.0) module.weight.data.fill_(1.0)
CTRL_START_DOCSTRING = r""" CTRL model was proposed in CTRL_START_DOCSTRING = r""" CTRL model was proposed in
`CTRL: A Conditional Transformer Language Model for Controllable Generation`_ `CTRL: A Conditional Transformer Language Model for Controllable Generation`_
by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
...@@ -224,7 +224,7 @@ CTRL_INPUTS_DOCSTRING = r""" Inputs: ...@@ -224,7 +224,7 @@ CTRL_INPUTS_DOCSTRING = r""" Inputs:
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer): list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices. Mask to avoid performing attention on padding token indices.
...@@ -261,7 +261,7 @@ class CTRLModel(CTRLPreTrainedModel): ...@@ -261,7 +261,7 @@ class CTRLModel(CTRLPreTrainedModel):
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks). that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
...@@ -464,7 +464,7 @@ class CTRLLMHeadModel(CTRLPreTrainedModel): ...@@ -464,7 +464,7 @@ class CTRLLMHeadModel(CTRLPreTrainedModel):
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks). that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
......
...@@ -366,12 +366,12 @@ DISTILBERT_START_DOCSTRING = r""" ...@@ -366,12 +366,12 @@ DISTILBERT_START_DOCSTRING = r"""
For more information on DistilBERT, please refer to our For more information on DistilBERT, please refer to our
`detailed blog post`_ `detailed blog post`_
.. _`detailed blog post`: .. _`detailed blog post`:
https://medium.com/huggingface/distilbert-8cf3380435b5 https://medium.com/huggingface/distilbert-8cf3380435b5
Parameters: Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -381,7 +381,7 @@ DISTILBERT_INPUTS_DOCSTRING = r""" ...@@ -381,7 +381,7 @@ DISTILBERT_INPUTS_DOCSTRING = r"""
**input_ids** ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: **input_ids** ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary. Indices of input sequence tokens in the vocabulary.
The input sequences should start with `[CLS]` and end with `[SEP]` tokens. The input sequences should start with `[CLS]` and end with `[SEP]` tokens.
For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT. For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT.
**attention_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: **attention_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices. Mask to avoid performing attention on padding token indices.
......
...@@ -304,7 +304,7 @@ GPT2_INPUTS_DOCSTRING = r""" Inputs: ...@@ -304,7 +304,7 @@ GPT2_INPUTS_DOCSTRING = r""" Inputs:
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer): list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices. Mask to avoid performing attention on padding token indices.
...@@ -341,7 +341,7 @@ class GPT2Model(GPT2PreTrainedModel): ...@@ -341,7 +341,7 @@ class GPT2Model(GPT2PreTrainedModel):
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks). that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
...@@ -532,7 +532,7 @@ class GPT2LMHeadModel(GPT2PreTrainedModel): ...@@ -532,7 +532,7 @@ class GPT2LMHeadModel(GPT2PreTrainedModel):
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks). that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
...@@ -640,7 +640,7 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel): ...@@ -640,7 +640,7 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
**past**: **past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks). that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed. should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
...@@ -654,15 +654,15 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel): ...@@ -654,15 +654,15 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
import torch import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2') model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!) # Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices] encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
......
...@@ -75,10 +75,10 @@ class ModalEmbeddings(nn.Module): ...@@ -75,10 +75,10 @@ class ModalEmbeddings(nn.Module):
return embeddings return embeddings
MMBT_START_DOCSTRING = r""" MMBT model was proposed in MMBT_START_DOCSTRING = r""" MMBT model was proposed in
`Supervised Multimodal Bitransformers for Classifying Images and Text`_ `Supervised Multimodal Bitransformers for Classifying Images and Text`_
by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine. by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, It's a supervised multimodal bitransformer model that fuses information from text and other image encoders,
and obtain state-of-the-art performance on various multimodal classification benchmark tasks. and obtain state-of-the-art performance on various multimodal classification benchmark tasks.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
...@@ -93,15 +93,15 @@ MMBT_START_DOCSTRING = r""" MMBT model was proposed in ...@@ -93,15 +93,15 @@ MMBT_START_DOCSTRING = r""" MMBT model was proposed in
Parameters: Parameters:
config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
transformer (:class: `~nn.Module`): A text transformer that is used by MMBT. transformer (:class: `~nn.Module`): A text transformer that is used by MMBT.
It should have embeddings, encoder, and pooler attributes. It should have embeddings, encoder, and pooler attributes.
encoder (:class: `~nn.Module`): Encoder for the second modality. encoder (:class: `~nn.Module`): Encoder for the second modality.
It should take in a batch of modal inputs and return k, n dimension embeddings. It should take in a batch of modal inputs and return k, n dimension embeddings.
""" """
MMBT_INPUTS_DOCSTRING = r""" Inputs: MMBT_INPUTS_DOCSTRING = r""" Inputs:
**input_modal**: ``torch.FloatTensor`` of shape ``(batch_size, ***)``: **input_modal**: ``torch.FloatTensor`` of shape ``(batch_size, ***)``:
The other modality data. It will be the shape that the encoder for that type expects. The other modality data. It will be the shape that the encoder for that type expects.
e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width) e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width)
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary. Indices of input sequence tokens in the vocabulary.
...@@ -119,7 +119,7 @@ MMBT_INPUTS_DOCSTRING = r""" Inputs: ...@@ -119,7 +119,7 @@ MMBT_INPUTS_DOCSTRING = r""" Inputs:
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate different portions of the inputs. Segment token indices to indicate different portions of the inputs.
**modal_token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``: **modal_token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``:
Segment token indices to indicate different portions of the non-text modality. Segment token indices to indicate different portions of the non-text modality.
The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality. The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings. Indices of positions of each input sequence tokens in the position embeddings.
......
...@@ -97,11 +97,11 @@ ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in ...@@ -97,11 +97,11 @@ ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
`RoBERTa: A Robustly Optimized BERT Pretraining Approach`_ `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
Veselin Stoyanov. It is based on Google's BERT model released in 2018. Veselin Stoyanov. It is based on Google's BERT model released in 2018.
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
objective and training with much larger mini-batches and learning rates. objective and training with much larger mini-batches and learning rates.
This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
models. models.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
...@@ -114,7 +114,7 @@ ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in ...@@ -114,7 +114,7 @@ ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
https://pytorch.org/docs/stable/nn.html#module https://pytorch.org/docs/stable/nn.html#module
Parameters: Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration. model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -133,7 +133,7 @@ ROBERTA_INPUTS_DOCSTRING = r""" ...@@ -133,7 +133,7 @@ ROBERTA_INPUTS_DOCSTRING = r"""
``tokens: <s> the dog is hairy . </s>`` ``tokens: <s> the dog is hairy . </s>``
Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``. the ``add_special_tokens`` parameter set to ``True``.
RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on
...@@ -319,7 +319,7 @@ class RobertaLMHead(nn.Module): ...@@ -319,7 +319,7 @@ class RobertaLMHead(nn.Module):
@add_start_docstrings( @add_start_docstrings(
"""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer """RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """, on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING, ROBERTA_START_DOCSTRING,
ROBERTA_INPUTS_DOCSTRING, ROBERTA_INPUTS_DOCSTRING,
......
...@@ -661,7 +661,7 @@ T5_START_DOCSTRING = r""" The T5 model was proposed in ...@@ -661,7 +661,7 @@ T5_START_DOCSTRING = r""" The T5 model was proposed in
https://pytorch.org/docs/stable/nn.html#module https://pytorch.org/docs/stable/nn.html#module
Parameters: Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model. config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
......
...@@ -510,7 +510,7 @@ ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in ...@@ -510,7 +510,7 @@ ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters: Parameters:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -524,13 +524,13 @@ ALBERT_INPUTS_DOCSTRING = r""" ...@@ -524,13 +524,13 @@ ALBERT_INPUTS_DOCSTRING = r"""
(a) For sequence pairs: (a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences: (b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]`` ``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0`` ``token_type_ids: 0 0 0 0 0 0 0``
Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on
......
...@@ -356,7 +356,7 @@ class TFCTRLPreTrainedModel(TFPreTrainedModel): ...@@ -356,7 +356,7 @@ class TFCTRLPreTrainedModel(TFPreTrainedModel):
base_model_prefix = "transformer" base_model_prefix = "transformer"
CTRL_START_DOCSTRING = r""" CTRL model was proposed in CTRL_START_DOCSTRING = r""" CTRL model was proposed in
`CTRL: A Conditional Transformer Language Model for Controllable Generation`_ `CTRL: A Conditional Transformer Language Model for Controllable Generation`_
by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
......
...@@ -109,7 +109,7 @@ class TFEmbeddings(tf.keras.layers.Layer): ...@@ -109,7 +109,7 @@ class TFEmbeddings(tf.keras.layers.Layer):
linear tensor, float32 with shape [batch_size, length, vocab_size]. linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises: Raises:
ValueError: if mode is not valid. ValueError: if mode is not valid.
Shared weights logic adapted from Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
""" """
...@@ -487,7 +487,7 @@ DISTILBERT_START_DOCSTRING = r""" ...@@ -487,7 +487,7 @@ DISTILBERT_START_DOCSTRING = r"""
For more information on DistilBERT, please refer to our For more information on DistilBERT, please refer to our
`detailed blog post`_ `detailed blog post`_
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior. refer to the TF 2.0 documentation for all matter related to general usage and behavior.
...@@ -514,7 +514,7 @@ DISTILBERT_START_DOCSTRING = r""" ...@@ -514,7 +514,7 @@ DISTILBERT_START_DOCSTRING = r"""
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters: Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
""" """
...@@ -524,7 +524,7 @@ DISTILBERT_INPUTS_DOCSTRING = r""" ...@@ -524,7 +524,7 @@ DISTILBERT_INPUTS_DOCSTRING = r"""
**input_ids** ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: **input_ids** ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary. Indices of input sequence tokens in the vocabulary.
The input sequences should start with `[CLS]` and end with `[SEP]` tokens. The input sequences should start with `[CLS]` and end with `[SEP]` tokens.
For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT. For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices. Mask to avoid performing attention on padding token indices.
......
...@@ -584,14 +584,14 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): ...@@ -584,14 +584,14 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2') model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!) # Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0 # This option is currently not implemented in TF 2.0
raise NotImplementedError raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices] encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
......
...@@ -553,7 +553,7 @@ class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel): ...@@ -553,7 +553,7 @@ class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!) # Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0 # This option is currently not implemented in TF 2.0
raise NotImplementedError raise NotImplementedError
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment