Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
71f94a8a
Commit
71f94a8a
authored
Dec 23, 2019
by
Aymeric Augustin
Browse files
Remove unused variables in src.
parent
81422c4e
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
7 additions
and
16 deletions
+7
-16
src/transformers/data/metrics/__init__.py
src/transformers/data/metrics/__init__.py
+1
-1
src/transformers/modeling_albert.py
src/transformers/modeling_albert.py
+0
-5
src/transformers/modeling_t5.py
src/transformers/modeling_t5.py
+0
-1
src/transformers/modeling_tf_pytorch_utils.py
src/transformers/modeling_tf_pytorch_utils.py
+3
-4
src/transformers/modeling_tf_t5.py
src/transformers/modeling_tf_t5.py
+0
-1
src/transformers/modeling_tf_transfo_xl_utilities.py
src/transformers/modeling_tf_transfo_xl_utilities.py
+0
-1
src/transformers/modeling_tf_utils.py
src/transformers/modeling_tf_utils.py
+3
-3
No files found.
src/transformers/data/metrics/__init__.py
View file @
71f94a8a
...
...
@@ -19,7 +19,7 @@ try:
from
sklearn.metrics
import
matthews_corrcoef
,
f1_score
_has_sklearn
=
True
except
(
AttributeError
,
ImportError
)
as
e
:
except
(
AttributeError
,
ImportError
):
_has_sklearn
=
False
...
...
src/transformers/modeling_albert.py
View file @
71f94a8a
...
...
@@ -241,8 +241,6 @@ class AlbertAttention(BertSelfAttention):
context_layer
=
torch
.
matmul
(
attention_probs
,
value_layer
)
context_layer
=
context_layer
.
permute
(
0
,
2
,
1
,
3
).
contiguous
()
new_context_layer_shape
=
context_layer
.
size
()[:
-
2
]
+
(
self
.
all_head_size
,)
reshaped_context_layer
=
context_layer
.
view
(
*
new_context_layer_shape
)
# Should find a better way to do this
w
=
(
...
...
@@ -334,9 +332,6 @@ class AlbertTransformer(nn.Module):
# Index of the hidden group
group_idx
=
int
(
i
/
(
self
.
config
.
num_hidden_layers
/
self
.
config
.
num_hidden_groups
))
# Index of the layer inside the group
layer_idx
=
int
(
i
-
group_idx
*
layers_per_group
)
layer_group_output
=
self
.
albert_layer_groups
[
group_idx
](
hidden_states
,
attention_mask
,
...
...
src/transformers/modeling_t5.py
View file @
71f94a8a
...
...
@@ -629,7 +629,6 @@ class T5Stack(T5PreTrainedModel):
all_attentions
=
all_attentions
+
(
layer_outputs
[
1
],)
# We keep only self-attention weights for now
hidden_states
=
self
.
final_layer_norm
(
hidden_states
)
layer_output
=
self
.
dropout
(
hidden_states
)
# Add last layer
if
self
.
output_hidden_states
:
...
...
src/transformers/modeling_tf_pytorch_utils.py
View file @
71f94a8a
...
...
@@ -122,7 +122,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
tf_inputs
=
tf_model
.
dummy_inputs
if
tf_inputs
is
not
None
:
tfo
=
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure model is built
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
...
...
@@ -187,7 +187,7 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
K
.
batch_set_value
(
weight_value_tuples
)
if
tf_inputs
is
not
None
:
tfo
=
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure restore ops are run
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure restore ops are run
logger
.
info
(
"Loaded {:,} parameters in the TF 2.0 model."
.
format
(
tf_loaded_numel
))
...
...
@@ -218,7 +218,6 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs
import
transformers
tf_path
=
os
.
path
.
abspath
(
tf_checkpoint_path
)
logger
.
info
(
"Loading TensorFlow weights from {}"
.
format
(
tf_checkpoint_path
))
# Instantiate and load the associated TF 2.0 model
...
...
@@ -230,7 +229,7 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs
tf_inputs
=
tf_model
.
dummy_inputs
if
tf_inputs
is
not
None
:
tfo
=
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure model is built
tf_model
(
tf_inputs
,
training
=
False
)
# Make sure model is built
tf_model
.
load_weights
(
tf_checkpoint_path
,
by_name
=
True
)
...
...
src/transformers/modeling_tf_t5.py
View file @
71f94a8a
...
...
@@ -491,7 +491,6 @@ class TFT5MainLayer(tf.keras.layers.Layer):
all_attentions
=
all_attentions
+
(
layer_outputs
[
1
],)
hidden_states
=
self
.
final_layer_norm
(
hidden_states
)
layer_output
=
self
.
dropout
(
hidden_states
,
training
=
training
)
# Add last layer
if
self
.
output_hidden_states
:
...
...
src/transformers/modeling_tf_transfo_xl_utilities.py
View file @
71f94a8a
...
...
@@ -118,7 +118,6 @@ class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
hidden
,
target
=
inputs
head_logprob
=
0
if
self
.
n_clusters
==
0
:
softmax_b
=
tf
.
get_variable
(
"bias"
,
[
self
.
config
.
vocab_size
],
initializer
=
tf
.
zeros_initializer
())
output
=
self
.
_logit
(
hidden
,
self
.
out_layers
[
0
][
0
],
self
.
out_layers
[
0
][
1
],
self
.
out_projs
[
0
])
if
target
is
not
None
:
loss
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
labels
=
target
,
logits
=
output
)
...
...
src/transformers/modeling_tf_utils.py
View file @
71f94a8a
...
...
@@ -320,7 +320,7 @@ class TFPreTrainedModel(tf.keras.Model):
# Load from a PyTorch checkpoint
return
load_pytorch_checkpoint_in_tf2_model
(
model
,
resolved_archive_file
,
allow_missing_keys
=
True
)
ret
=
model
(
model
.
dummy_inputs
,
training
=
False
)
# build the network with dummy inputs
model
(
model
.
dummy_inputs
,
training
=
False
)
# build the network with dummy inputs
assert
os
.
path
.
isfile
(
resolved_archive_file
),
"Error retrieving file {}"
.
format
(
resolved_archive_file
)
# 'by_name' allow us to do transfer learning by skipping/adding layers
...
...
@@ -333,7 +333,7 @@ class TFPreTrainedModel(tf.keras.Model):
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
ret
=
model
(
model
.
dummy_inputs
,
training
=
False
)
# Make sure restore ops are run
model
(
model
.
dummy_inputs
,
training
=
False
)
# Make sure restore ops are run
# Check if the models are the same to output loading informations
with
h5py
.
File
(
resolved_archive_file
,
"r"
)
as
f
:
...
...
@@ -515,7 +515,7 @@ class TFSequenceSummary(tf.keras.layers.Layer):
cls_index
=
inputs
[
1
]
if
len
(
inputs
)
>
1
else
None
assert
len
(
inputs
)
<=
2
,
"Too many inputs."
else
:
input_id
s
=
inputs
.
get
(
"
input_id
s"
)
hidden_state
s
=
inputs
.
get
(
"
hidden_state
s"
)
cls_index
=
inputs
.
get
(
"cls_index"
,
None
)
if
self
.
summary_type
==
"last"
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment