Commit 71f94a8a authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Remove unused variables in src.

parent 81422c4e
...@@ -19,7 +19,7 @@ try: ...@@ -19,7 +19,7 @@ try:
from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True _has_sklearn = True
except (AttributeError, ImportError) as e: except (AttributeError, ImportError):
_has_sklearn = False _has_sklearn = False
......
...@@ -241,8 +241,6 @@ class AlbertAttention(BertSelfAttention): ...@@ -241,8 +241,6 @@ class AlbertAttention(BertSelfAttention):
context_layer = torch.matmul(attention_probs, value_layer) context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
reshaped_context_layer = context_layer.view(*new_context_layer_shape)
# Should find a better way to do this # Should find a better way to do this
w = ( w = (
...@@ -334,9 +332,6 @@ class AlbertTransformer(nn.Module): ...@@ -334,9 +332,6 @@ class AlbertTransformer(nn.Module):
# Index of the hidden group # Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
# Index of the layer inside the group
layer_idx = int(i - group_idx * layers_per_group)
layer_group_output = self.albert_layer_groups[group_idx]( layer_group_output = self.albert_layer_groups[group_idx](
hidden_states, hidden_states,
attention_mask, attention_mask,
......
...@@ -629,7 +629,6 @@ class T5Stack(T5PreTrainedModel): ...@@ -629,7 +629,6 @@ class T5Stack(T5PreTrainedModel):
all_attentions = all_attentions + (layer_outputs[1],) # We keep only self-attention weights for now all_attentions = all_attentions + (layer_outputs[1],) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.final_layer_norm(hidden_states)
layer_output = self.dropout(hidden_states)
# Add last layer # Add last layer
if self.output_hidden_states: if self.output_hidden_states:
......
...@@ -122,7 +122,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a ...@@ -122,7 +122,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
tf_inputs = tf_model.dummy_inputs tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None: if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure model is built tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead # Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict # Convert old format to new format if needed from a PyTorch state_dict
...@@ -187,7 +187,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a ...@@ -187,7 +187,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
K.batch_set_value(weight_value_tuples) K.batch_set_value(weight_value_tuples)
if tf_inputs is not None: if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure restore ops are run tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel)) logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
...@@ -218,7 +218,6 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs ...@@ -218,7 +218,6 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs
import transformers import transformers
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path)) logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model # Instantiate and load the associated TF 2.0 model
...@@ -230,7 +229,7 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs ...@@ -230,7 +229,7 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs
tf_inputs = tf_model.dummy_inputs tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None: if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure model is built tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True) tf_model.load_weights(tf_checkpoint_path, by_name=True)
......
...@@ -491,7 +491,6 @@ class TFT5MainLayer(tf.keras.layers.Layer): ...@@ -491,7 +491,6 @@ class TFT5MainLayer(tf.keras.layers.Layer):
all_attentions = all_attentions + (layer_outputs[1],) all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.final_layer_norm(hidden_states)
layer_output = self.dropout(hidden_states, training=training)
# Add last layer # Add last layer
if self.output_hidden_states: if self.output_hidden_states:
......
...@@ -118,7 +118,6 @@ class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): ...@@ -118,7 +118,6 @@ class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
hidden, target = inputs hidden, target = inputs
head_logprob = 0 head_logprob = 0
if self.n_clusters == 0: if self.n_clusters == 0:
softmax_b = tf.get_variable("bias", [self.config.vocab_size], initializer=tf.zeros_initializer())
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0]) output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None: if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
......
...@@ -320,7 +320,7 @@ class TFPreTrainedModel(tf.keras.Model): ...@@ -320,7 +320,7 @@ class TFPreTrainedModel(tf.keras.Model):
# Load from a PyTorch checkpoint # Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
ret = model(model.dummy_inputs, training=False) # build the network with dummy inputs model(model.dummy_inputs, training=False) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file) assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file)
# 'by_name' allow us to do transfer learning by skipping/adding layers # 'by_name' allow us to do transfer learning by skipping/adding layers
...@@ -333,7 +333,7 @@ class TFPreTrainedModel(tf.keras.Model): ...@@ -333,7 +333,7 @@ class TFPreTrainedModel(tf.keras.Model):
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
) )
ret = model(model.dummy_inputs, training=False) # Make sure restore ops are run model(model.dummy_inputs, training=False) # Make sure restore ops are run
# Check if the models are the same to output loading informations # Check if the models are the same to output loading informations
with h5py.File(resolved_archive_file, "r") as f: with h5py.File(resolved_archive_file, "r") as f:
...@@ -515,7 +515,7 @@ class TFSequenceSummary(tf.keras.layers.Layer): ...@@ -515,7 +515,7 @@ class TFSequenceSummary(tf.keras.layers.Layer):
cls_index = inputs[1] if len(inputs) > 1 else None cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs." assert len(inputs) <= 2, "Too many inputs."
else: else:
input_ids = inputs.get("input_ids") hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None) cls_index = inputs.get("cls_index", None)
if self.summary_type == "last": if self.summary_type == "last":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment