Unverified Commit dad5ca83 authored by Joao Gante's avatar Joao Gante Committed by GitHub
Browse files

TF: Finalize `unpack_inputs`-related changes (#16499)

* Add unpack_inputs to remaining models

* removed kwargs to `call()` in TF models

* fix TF T5 tests
parent be9474bd
...@@ -360,7 +360,6 @@ class TFXLMMainLayer(tf.keras.layers.Layer): ...@@ -360,7 +360,6 @@ class TFXLMMainLayer(tf.keras.layers.Layer):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
# removed: src_enc=None, src_len=None # removed: src_enc=None, src_len=None
...@@ -707,7 +706,6 @@ class TFXLMModel(TFXLMPreTrainedModel): ...@@ -707,7 +706,6 @@ class TFXLMModel(TFXLMPreTrainedModel):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
outputs = self.transformer( outputs = self.transformer(
input_ids=input_ids, input_ids=input_ids,
...@@ -843,7 +841,6 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel): ...@@ -843,7 +841,6 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
transformer_outputs = self.transformer( transformer_outputs = self.transformer(
input_ids=input_ids, input_ids=input_ids,
...@@ -917,7 +914,6 @@ class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificat ...@@ -917,7 +914,6 @@ class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificat
return_dict=None, return_dict=None,
labels=None, labels=None,
training=False, training=False,
**kwargs,
): ):
r""" r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
...@@ -1025,7 +1021,6 @@ class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss): ...@@ -1025,7 +1021,6 @@ class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss):
return_dict=None, return_dict=None,
labels=None, labels=None,
training=False, training=False,
**kwargs,
): ):
if input_ids is not None: if input_ids is not None:
num_choices = shape_list(input_ids)[1] num_choices = shape_list(input_ids)[1]
...@@ -1150,7 +1145,6 @@ class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLos ...@@ -1150,7 +1145,6 @@ class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLos
return_dict=None, return_dict=None,
labels=None, labels=None,
training=False, training=False,
**kwargs,
): ):
r""" r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
...@@ -1237,7 +1231,6 @@ class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringL ...@@ -1237,7 +1231,6 @@ class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringL
start_positions=None, start_positions=None,
end_positions=None, end_positions=None,
training=False, training=False,
**kwargs,
): ):
r""" r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......
...@@ -597,7 +597,6 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): ...@@ -597,7 +597,6 @@ class TFXLNetMainLayer(tf.keras.layers.Layer):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
if training and use_mems is None: if training and use_mems is None:
...@@ -1152,7 +1151,6 @@ class TFXLNetModel(TFXLNetPreTrainedModel): ...@@ -1152,7 +1151,6 @@ class TFXLNetModel(TFXLNetPreTrainedModel):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
outputs = self.transformer( outputs = self.transformer(
input_ids=input_ids, input_ids=input_ids,
...@@ -1262,7 +1260,6 @@ class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss): ...@@ -1262,7 +1260,6 @@ class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss):
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFXLNetLMHeadModelOutput, Tuple[tf.Tensor]]: ) -> Union[TFXLNetLMHeadModelOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
...@@ -1394,7 +1391,6 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassif ...@@ -1394,7 +1391,6 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassif
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFXLNetForSequenceClassificationOutput, Tuple[tf.Tensor]]: ) -> Union[TFXLNetForSequenceClassificationOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
...@@ -1501,7 +1497,6 @@ class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss): ...@@ -1501,7 +1497,6 @@ class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss):
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFXLNetForMultipleChoiceOutput, Tuple[tf.Tensor]]: ) -> Union[TFXLNetForMultipleChoiceOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
...@@ -1623,7 +1618,6 @@ class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificatio ...@@ -1623,7 +1618,6 @@ class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificatio
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFXLNetForTokenClassificationOutput, Tuple[tf.Tensor]]: ) -> Union[TFXLNetForTokenClassificationOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
...@@ -1711,7 +1705,6 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnswer ...@@ -1711,7 +1705,6 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnswer
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFXLNetForQuestionAnsweringSimpleOutput, Tuple[tf.Tensor]]: ) -> Union[TFXLNetForQuestionAnsweringSimpleOutput, Tuple[tf.Tensor]]:
r""" r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
......
...@@ -653,7 +653,6 @@ class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer): ...@@ -653,7 +653,6 @@ class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer):
output_hidden_states: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
training: bool = False, training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
if not self.config.is_decoder: if not self.config.is_decoder:
...@@ -949,7 +948,6 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod ...@@ -949,7 +948,6 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod
output_hidden_states: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
r""" r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
...@@ -1049,7 +1047,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelca ...@@ -1049,7 +1047,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelca
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
...@@ -1146,7 +1143,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelca ...@@ -1146,7 +1143,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelca
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
r""" r"""
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
...@@ -1289,7 +1285,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookie ...@@ -1289,7 +1285,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookie
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
...@@ -1379,7 +1374,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.c ...@@ -1379,7 +1374,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.c
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
...@@ -1506,7 +1500,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecut ...@@ -1506,7 +1500,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecut
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r""" r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
...@@ -1588,7 +1581,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutte ...@@ -1588,7 +1581,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutte
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False, training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r""" r"""
start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
...@@ -2262,7 +2254,6 @@ class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer): ...@@ -2262,7 +2254,6 @@ class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
""" """
Args: Args:
...@@ -2421,7 +2412,6 @@ class TF{{cookiecutter.camelcase_modelname}}Decoder(tf.keras.layers.Layer): ...@@ -2421,7 +2412,6 @@ class TF{{cookiecutter.camelcase_modelname}}Decoder(tf.keras.layers.Layer):
output_hidden_states=None, output_hidden_states=None,
return_dict=None, return_dict=None,
training=False, training=False,
**kwargs,
): ):
r""" r"""
Args: Args:
...@@ -2876,7 +2866,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(TF{{cookiec ...@@ -2876,7 +2866,6 @@ class TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(TF{{cookiec
return_dict=None, return_dict=None,
labels=None, labels=None,
training=False, training=False,
**kwargs,
): ):
""" """
Returns: Returns:
......
...@@ -355,7 +355,6 @@ class TFConvBertModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -355,7 +355,6 @@ class TFConvBertModelTest(TFModelTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False config.output_hidden_states = False
model = model_class(config) model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class)) outputs = model(self._prepare_for_class(inputs_dict, model_class))
......
...@@ -346,6 +346,11 @@ class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -346,6 +346,11 @@ class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):
self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer)) self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size) self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size)
# This test is run in `TFT5EncoderOnlyModelTest`, where the main layer has the same inputs as the model
@unittest.skip(reason="The inputs of the Main Layer are different.")
def test_keras_save_load(self):
pass
class TFT5EncoderOnlyModelTester: class TFT5EncoderOnlyModelTester:
def __init__( def __init__(
......
...@@ -573,7 +573,12 @@ class TFModelTesterMixin: ...@@ -573,7 +573,12 @@ class TFModelTesterMixin:
pt_model = pt_model_class(config) pt_model = pt_model_class(config)
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
tf_inputs_dict_maybe_with_labels = self._prepare_for_class(inputs_dict, model_class, return_labels=True) tf_inputs_dict_maybe_with_labels = self._prepare_for_class(
inputs_dict,
model_class,
# Not all models accept "labels" in the forward pass (yet :) )
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
)
# Check we can load pt model in tf and vice-versa with model => model functions # Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=tf_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=tf_inputs_dict)
...@@ -722,7 +727,6 @@ class TFModelTesterMixin: ...@@ -722,7 +727,6 @@ class TFModelTesterMixin:
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False config.output_hidden_states = False
model = model_class(config) model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class)) outputs = model(self._prepare_for_class(inputs_dict, model_class))
...@@ -944,10 +948,6 @@ class TFModelTesterMixin: ...@@ -944,10 +948,6 @@ class TFModelTesterMixin:
dict_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs) check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class) tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
...@@ -956,6 +956,12 @@ class TFModelTesterMixin: ...@@ -956,6 +956,12 @@ class TFModelTesterMixin:
dict_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
# Not all models accept "labels" in the forward pass (yet :) )
if "labels" in inspect.signature(model.call).parameters.keys():
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment