Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
4053c2ba
Commit
4053c2ba
authored
Mar 22, 2022
by
saberkun
Browse files
Merge pull request #10514 from ZihanWangKi:master
PiperOrigin-RevId: 436548878
parents
f9f2765d
259c4347
Changes
222
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2041 additions
and
12 deletions
+2041
-12
official/legacy/image_classification/callbacks.py
official/legacy/image_classification/callbacks.py
+0
-1
official/legacy/image_classification/classifier_trainer.py
official/legacy/image_classification/classifier_trainer.py
+0
-1
official/legacy/image_classification/classifier_trainer_test.py
...al/legacy/image_classification/classifier_trainer_test.py
+0
-1
official/legacy/image_classification/classifier_trainer_util_test.py
...gacy/image_classification/classifier_trainer_util_test.py
+0
-1
official/legacy/image_classification/dataset_factory.py
official/legacy/image_classification/dataset_factory.py
+0
-1
official/legacy/image_classification/learning_rate.py
official/legacy/image_classification/learning_rate.py
+0
-1
official/projects/assemblenet/modeling/assemblenet.py
official/projects/assemblenet/modeling/assemblenet.py
+0
-1
official/projects/assemblenet/modeling/rep_flow_2d_layer.py
official/projects/assemblenet/modeling/rep_flow_2d_layer.py
+0
-1
official/projects/deepmac_maskrcnn/serving/detection.py
official/projects/deepmac_maskrcnn/serving/detection.py
+0
-1
official/projects/deepmac_maskrcnn/serving/detection_test.py
official/projects/deepmac_maskrcnn/serving/detection_test.py
+0
-1
official/projects/edgetpu/vision/tasks/image_classification_test.py
...rojects/edgetpu/vision/tasks/image_classification_test.py
+0
-1
official/projects/edgetpu/vision/tasks/semantic_segmentation_test.py
...ojects/edgetpu/vision/tasks/semantic_segmentation_test.py
+0
-1
official/projects/longformer/README.md
official/projects/longformer/README.md
+50
-0
official/projects/longformer/experiments/glue_mnli.yaml
official/projects/longformer/experiments/glue_mnli.yaml
+47
-0
official/projects/longformer/experiments/glue_mnli_allenai.yaml
...al/projects/longformer/experiments/glue_mnli_allenai.yaml
+48
-0
official/projects/longformer/experiments/pretraining_512.yaml
...cial/projects/longformer/experiments/pretraining_512.yaml
+74
-0
official/projects/longformer/longformer.py
official/projects/longformer/longformer.py
+69
-0
official/projects/longformer/longformer_attention.py
official/projects/longformer/longformer_attention.py
+1082
-0
official/projects/longformer/longformer_attention_test.py
official/projects/longformer/longformer_attention_test.py
+306
-0
official/projects/longformer/longformer_encoder.py
official/projects/longformer/longformer_encoder.py
+365
-0
No files found.
official/legacy/image_classification/callbacks.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Common modules for callbacks."""
"""Common modules for callbacks."""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
division
...
...
official/legacy/image_classification/classifier_trainer.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Runs an Image Classification model."""
"""Runs an Image Classification model."""
import
os
import
os
...
...
official/legacy/image_classification/classifier_trainer_test.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Unit tests for the classifier trainer models."""
"""Unit tests for the classifier trainer models."""
import
functools
import
functools
...
...
official/legacy/image_classification/classifier_trainer_util_test.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Unit tests for the classifier trainer models."""
"""Unit tests for the classifier trainer models."""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
...
...
official/legacy/image_classification/dataset_factory.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset."""
"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset."""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
division
...
...
official/legacy/image_classification/learning_rate.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Learning rate utilities for vision tasks."""
"""Learning rate utilities for vision tasks."""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
division
...
...
official/projects/assemblenet/modeling/assemblenet.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Contains definitions for the AssembleNet [1] models.
"""Contains definitions for the AssembleNet [1] models.
Requires the AssembleNet architecture to be specified in
Requires the AssembleNet architecture to be specified in
...
...
official/projects/assemblenet/modeling/rep_flow_2d_layer.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Contains definitions for 'Representation Flow' layer [1].
"""Contains definitions for 'Representation Flow' layer [1].
Representation flow layer is a generalization of optical flow extraction; the
Representation flow layer is a generalization of optical flow extraction; the
...
...
official/projects/deepmac_maskrcnn/serving/detection.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Detection input and model functions for serving/inference."""
"""Detection input and model functions for serving/inference."""
from
typing
import
Dict
,
Mapping
,
Text
from
typing
import
Dict
,
Mapping
,
Text
...
...
official/projects/deepmac_maskrcnn/serving/detection_test.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Test for image detection export lib."""
"""Test for image detection export lib."""
import
io
import
io
...
...
official/projects/edgetpu/vision/tasks/image_classification_test.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Tests for image classification task."""
"""Tests for image classification task."""
# pylint: disable=unused-import
# pylint: disable=unused-import
...
...
official/projects/edgetpu/vision/tasks/semantic_segmentation_test.py
View file @
4053c2ba
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
# Lint as: python3
"""Tests for semantic segmentation task."""
"""Tests for semantic segmentation task."""
# pylint: disable=unused-import
# pylint: disable=unused-import
...
...
official/projects/longformer/README.md
0 → 100644
View file @
4053c2ba
# Longformer: The Long-Document Transformer
## Modifications from Huggingface's Implementation
All models require a
`global_attention_size`
specified in the config, setting a
global attention for all first
`global_attention_size`
tokens in any sentence.
Individual different global attention sizes for sentences are not supported.
This setting allows running on TPUs where tensor sizes have to be determined.
`_get_global_attn_indices`
in
`longformer_attention.py`
contains how the new
global attention indices are specified. Changed all
`tf.cond`
to if
confiditions, since global attention is specified in the start now.
To load weights from a pre-trained huggingface longformer, run
`utils/convert_pretrained_pytorch_checkpoint_to_tf.py`
to create a checkpoint.
\
There is also a
`utils/longformer_tokenizer_to_tfrecord.py`
that transformers
pytorch longformer tokenized data to tf_records.
## Steps to Fine-tune on MNLI
#### Prepare the pre-trained checkpoint
Option 1. Use our saved checkpoint of
`allenai/longformer-base-4096`
stored in cloud storage
```
bash
gsutil
cp
-r
gs://model-garden-ucsd-zihan/longformer-4096 .
```
Option 2. Create it directly
```
bash
python3 utils/convert_pretrained_pytorch_checkpoint_to_tf.py
```
#### [Optional] Prepare the input file
```
bash
python3 longformer_tokenizer_to_tfrecord.py
```
#### Training
Here, we use the training data of MNLI that were uploaded to the cloud storage, you can replace it with the input files you generated.
```
bash
TRAIN_DATA
=
task.train_data.input_path
=
gs://model-garden-ucsd-zihan/longformer_allenai_mnli_train.tf_record,task.validation_data.input_path
=
gs://model-garden-ucsd-zihan/longformer_allenai_mnli_eval.tf_record
INIT_CHECKPOINT
=
longformer-4096/longformer
PYTHONPATH
=
/path/to/model/garden
\
python3 train.py
\
--experiment
=
longformer/glue
\
--config_file
=
experiments/glue_mnli_allenai.yaml
\
--params_override
=
"
${
TRAIN_DATA
}
,runtime.distribution_strategy=tpu,task.init_checkpoint=
${
INIT_CHECKPOINT
}
"
\
--tpu
=
local
\
--model_dir
=
/path/to/outputdir
\
--mode
=
train_and_eval
```
This should take ~ 3 hours to run, and give a performance of ~86.
official/projects/longformer/experiments/glue_mnli.yaml
0 → 100644
View file @
4053c2ba
task
:
hub_module_url
:
'
'
model
:
num_classes
:
3
encoder
:
type
:
any
any
:
max_position_embeddings
:
512
attention_window
:
[
32
,
32
,
32
,
32
,
32
,
32
,
32
,
32
,
32
,
32
,
32
,
32
]
global_attention_size
:
1
metric_type
:
'
accuracy'
train_data
:
drop_remainder
:
true
global_batch_size
:
32
input_path
:
TODO
is_training
:
true
seq_length
:
128
validation_data
:
drop_remainder
:
true
global_batch_size
:
32
input_path
:
TODO
is_training
:
false
seq_length
:
128
trainer
:
checkpoint_interval
:
1000
continuous_eval_timeout
:
7200
optimizer_config
:
learning_rate
:
polynomial
:
decay_steps
:
61359
end_learning_rate
:
0.0
initial_learning_rate
:
3.0e-05
power
:
1.0
type
:
polynomial
optimizer
:
type
:
adamw
warmup
:
polynomial
:
power
:
1
warmup_steps
:
6136
type
:
polynomial
steps_per_loop
:
100
summary_interval
:
100
# Training data size 392,702 examples, 5 epochs.
train_steps
:
61359
validation_interval
:
2000
validation_steps
:
307
official/projects/longformer/experiments/glue_mnli_allenai.yaml
0 → 100644
View file @
4053c2ba
task
:
hub_module_url
:
'
'
model
:
num_classes
:
3
encoder
:
type
:
any
any
:
max_position_embeddings
:
4098
attention_window
:
[
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
]
global_attention_size
:
1
vocab_size
:
50265
metric_type
:
'
accuracy'
train_data
:
drop_remainder
:
true
global_batch_size
:
32
input_path
:
TODO
is_training
:
true
seq_length
:
512
validation_data
:
drop_remainder
:
true
global_batch_size
:
32
input_path
:
TODO
is_training
:
false
seq_length
:
512
trainer
:
checkpoint_interval
:
1000
continuous_eval_timeout
:
7200
optimizer_config
:
learning_rate
:
polynomial
:
decay_steps
:
61359
end_learning_rate
:
0.0
initial_learning_rate
:
3.0e-05
power
:
1.0
type
:
polynomial
optimizer
:
type
:
adamw
warmup
:
polynomial
:
power
:
1
warmup_steps
:
6136
type
:
polynomial
steps_per_loop
:
1000
summary_interval
:
1000
# Training data size 392,702 examples, 5 epochs.
train_steps
:
61359
validation_interval
:
2000
validation_steps
:
307
official/projects/longformer/experiments/pretraining_512.yaml
0 → 100644
View file @
4053c2ba
This diff is collapsed.
Click to expand it.
official/projects/longformer/longformer.py
0 → 100644
View file @
4053c2ba
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer model configurations and instantiation methods."""
import
dataclasses
from
typing
import
List
import
tensorflow
as
tf
from
official.modeling
import
tf_utils
from
official.modeling.hyperparams
import
base_config
from
official.nlp.configs
import
encoders
from
official.projects.longformer.longformer_encoder
import
LongformerEncoder
@
dataclasses
.
dataclass
class
LongformerEncoderConfig
(
encoders
.
BertEncoderConfig
):
"""Extra paramerters for Longformer configs.
Attributes:
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
"""
attention_window
:
List
[
int
]
=
dataclasses
.
field
(
default_factory
=
list
)
global_attention_size
:
int
=
0
pad_token_id
:
int
=
1
@
base_config
.
bind
(
LongformerEncoderConfig
)
def
get_encoder
(
encoder_cfg
:
LongformerEncoderConfig
):
"""Gets a 'LongformerEncoder' object.
Args:
encoder_cfg: A 'LongformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder
=
LongformerEncoder
(
attention_window
=
encoder_cfg
.
attention_window
,
global_attention_size
=
encoder_cfg
.
global_attention_size
,
vocab_size
=
encoder_cfg
.
vocab_size
,
hidden_size
=
encoder_cfg
.
hidden_size
,
num_layers
=
encoder_cfg
.
num_layers
,
num_attention_heads
=
encoder_cfg
.
num_attention_heads
,
inner_dim
=
encoder_cfg
.
intermediate_size
,
inner_activation
=
tf_utils
.
get_activation
(
encoder_cfg
.
hidden_activation
),
output_dropout
=
encoder_cfg
.
dropout_rate
,
attention_dropout
=
encoder_cfg
.
attention_dropout_rate
,
max_sequence_length
=
encoder_cfg
.
max_position_embeddings
,
type_vocab_size
=
encoder_cfg
.
type_vocab_size
,
initializer
=
tf
.
keras
.
initializers
.
TruncatedNormal
(
stddev
=
encoder_cfg
.
initializer_range
),
output_range
=
encoder_cfg
.
output_range
,
embedding_width
=
encoder_cfg
.
embedding_size
,
norm_first
=
encoder_cfg
.
norm_first
)
return
encoder
official/projects/longformer/longformer_attention.py
0 → 100644
View file @
4053c2ba
This diff is collapsed.
Click to expand it.
official/projects/longformer/longformer_attention_test.py
0 → 100644
View file @
4053c2ba
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.longformer.longformer_attention."""
import
numpy
as
np
import
tensorflow
as
tf
from
official.modeling.tf_utils
import
get_shape_list
from
official.projects.longformer
import
longformer_attention
def
_create_mock_attention_data
(
num_heads
,
key_dim
,
value_dim
,
q_seq_length
,
kv_seq_length
,
batch_size
,
include_mask
=
False
):
"""Creates mock testing data.
Args:
num_heads: `int`, Number of attention heads.
key_dim: `int`, Size of query head.
value_dim: `int`, Size of key, value dim.
q_seq_length: `int`, query sequence length of the input.
kv_seq_length: `int`, key, value sequence length of the input.
batch_size: `int`, the batch size.
include_mask: optional `bool`, whether or not to include mask data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
query_shape
=
(
batch_size
,
q_seq_length
,
key_dim
)
value_shape
=
(
batch_size
,
kv_seq_length
,
value_dim
)
data
=
dict
(
query
=
tf
.
random
.
normal
(
shape
=
query_shape
),
value
=
tf
.
random
.
normal
(
shape
=
value_shape
),
key
=
tf
.
random
.
normal
(
shape
=
value_shape
))
total_seq_length
=
kv_seq_length
if
include_mask
:
mask_shape
=
(
batch_size
,
num_heads
,
q_seq_length
,
total_seq_length
)
mask_data
=
np
.
random
.
randint
(
2
,
size
=
mask_shape
).
astype
(
'float32'
)
mask_data
=
dict
(
attention_mask
=
mask_data
)
data
.
update
(
mask_data
)
return
data
class
LongformerAttentionTest
(
tf
.
test
.
TestCase
):
def
setUp
(
self
):
super
(
LongformerAttentionTest
,
self
).
setUp
()
np
.
random
.
seed
(
0
)
tf
.
random
.
set_seed
(
0
)
def
_get_hidden_states
(
self
):
return
tf
.
convert_to_tensor
(
[[
[
4.98332758e-01
,
2.69175139e00
,
-
7.08081422e-03
,
1.04915401e00
,
-
1.83476661e00
,
7.67220476e-01
,
2.98580543e-01
,
2.84803992e-02
,
],
[
-
7.58357372e-01
,
4.20635998e-01
,
-
4.04739919e-02
,
1.59924145e-01
,
2.05135748e00
,
-
1.15997978e00
,
5.37166397e-01
,
2.62873606e-01
,
],
[
-
1.69438001e00
,
4.17574660e-01
,
-
1.49196962e00
,
-
1.76483717e00
,
-
1.94566312e-01
,
-
1.71183858e00
,
7.72903565e-01
,
-
1.11557056e00
,
],
[
5.44028163e-01
,
2.05466114e-01
,
-
3.63045868e-01
,
2.41865062e-01
,
3.20348382e-01
,
-
9.05611176e-01
,
-
1.92690727e-01
,
-
1.19917547e00
,
],
]],
dtype
=
tf
.
float32
,
)
def
test_diagonalize
(
self
):
hidden_states
=
self
.
_get_hidden_states
()
hidden_states
=
tf
.
reshape
(
hidden_states
,
(
1
,
8
,
4
))
# set seq length = 8, hidden dim = 4
chunked_hidden_states
=
longformer_attention
.
LongformerAttention
.
_chunk
(
hidden_states
,
window_overlap
=
2
)
window_overlap_size
=
get_shape_list
(
chunked_hidden_states
)[
2
]
self
.
assertEqual
(
window_overlap_size
,
4
)
padded_hidden_states
=
longformer_attention
.
LongformerAttention
.
_pad_and_diagonalize
(
chunked_hidden_states
)
self
.
assertEqual
(
get_shape_list
(
padded_hidden_states
)[
-
1
],
get_shape_list
(
chunked_hidden_states
)[
-
1
]
+
window_overlap_size
-
1
)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
tf
.
debugging
.
assert_near
(
padded_hidden_states
[
0
,
0
,
0
,
:
4
],
chunked_hidden_states
[
0
,
0
,
0
],
rtol
=
1e-3
)
tf
.
debugging
.
assert_near
(
padded_hidden_states
[
0
,
0
,
0
,
4
:],
tf
.
zeros
((
3
,),
dtype
=
tf
.
dtypes
.
float32
),
rtol
=
1e-3
)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
tf
.
debugging
.
assert_near
(
padded_hidden_states
[
0
,
0
,
-
1
,
3
:],
chunked_hidden_states
[
0
,
0
,
-
1
],
rtol
=
1e-3
)
tf
.
debugging
.
assert_near
(
padded_hidden_states
[
0
,
0
,
-
1
,
:
3
],
tf
.
zeros
((
3
,),
dtype
=
tf
.
dtypes
.
float32
),
rtol
=
1e-3
)
def
test_pad_and_transpose_last_two_dims
(
self
):
hidden_states
=
self
.
_get_hidden_states
()
self
.
assertTrue
(
get_shape_list
(
hidden_states
),
[
1
,
8
,
4
])
# pad along seq length dim
paddings
=
tf
.
constant
([[
0
,
0
],
[
0
,
0
],
[
0
,
1
],
[
0
,
0
]],
dtype
=
tf
.
dtypes
.
int32
)
hidden_states
=
longformer_attention
.
LongformerAttention
.
_chunk
(
hidden_states
,
window_overlap
=
2
)
padded_hidden_states
=
longformer_attention
.
LongformerAttention
.
_pad_and_transpose_last_two_dims
(
hidden_states
,
paddings
)
self
.
assertEqual
(
get_shape_list
(
padded_hidden_states
),
[
1
,
1
,
8
,
5
])
expected_added_dim
=
tf
.
zeros
((
5
,),
dtype
=
tf
.
dtypes
.
float32
)
tf
.
debugging
.
assert_near
(
expected_added_dim
,
padded_hidden_states
[
0
,
0
,
-
1
,
:],
rtol
=
1e-6
)
tf
.
debugging
.
assert_near
(
hidden_states
[
0
,
0
,
-
1
,
:],
tf
.
reshape
(
padded_hidden_states
,
(
1
,
-
1
))[
0
,
24
:
32
],
rtol
=
1e-6
)
def
test_mask_invalid_locations
(
self
):
hidden_states
=
self
.
_get_hidden_states
()
batch_size
=
1
seq_length
=
8
hidden_size
=
4
hidden_states
=
tf
.
reshape
(
hidden_states
,
(
batch_size
,
seq_length
,
hidden_size
))
hidden_states
=
longformer_attention
.
LongformerAttention
.
_chunk
(
hidden_states
,
window_overlap
=
2
)
hid_states_1
=
longformer_attention
.
LongformerAttention
.
_mask_invalid_locations
(
hidden_states
,
1
)
hid_states_2
=
longformer_attention
.
LongformerAttention
.
_mask_invalid_locations
(
hidden_states
,
2
)
hid_states_3
=
longformer_attention
.
LongformerAttention
.
_mask_invalid_locations
(
hidden_states
[:,
:,
:,
:
3
],
2
)
hid_states_4
=
longformer_attention
.
LongformerAttention
.
_mask_invalid_locations
(
hidden_states
[:,
:,
2
:,
:],
2
)
self
.
assertEqual
(
tf
.
math
.
reduce_sum
(
tf
.
cast
(
tf
.
math
.
is_inf
(
hid_states_1
),
tf
.
dtypes
.
int32
)),
8
)
self
.
assertEqual
(
tf
.
math
.
reduce_sum
(
tf
.
cast
(
tf
.
math
.
is_inf
(
hid_states_2
),
tf
.
dtypes
.
int32
)),
24
)
self
.
assertEqual
(
tf
.
math
.
reduce_sum
(
tf
.
cast
(
tf
.
math
.
is_inf
(
hid_states_3
),
tf
.
dtypes
.
int32
)),
24
)
self
.
assertEqual
(
tf
.
math
.
reduce_sum
(
tf
.
cast
(
tf
.
math
.
is_inf
(
hid_states_4
),
tf
.
dtypes
.
int32
)),
12
)
def
test_chunk
(
self
):
hidden_states
=
self
.
_get_hidden_states
()
batch_size
=
1
seq_length
=
8
hidden_size
=
4
hidden_states
=
tf
.
reshape
(
hidden_states
,
(
batch_size
,
seq_length
,
hidden_size
))
chunked_hidden_states
=
longformer_attention
.
LongformerAttention
.
_chunk
(
hidden_states
,
window_overlap
=
2
)
# expected slices across chunk and seq length dim
expected_slice_along_seq_length
=
tf
.
convert_to_tensor
(
[
0.4983
,
-
0.7584
,
-
1.6944
],
dtype
=
tf
.
dtypes
.
float32
)
expected_slice_along_chunk
=
tf
.
convert_to_tensor
(
[
0.4983
,
-
1.8348
,
-
0.7584
,
2.0514
],
dtype
=
tf
.
dtypes
.
float32
)
self
.
assertEqual
(
get_shape_list
(
chunked_hidden_states
),
[
1
,
3
,
4
,
4
])
tf
.
debugging
.
assert_near
(
chunked_hidden_states
[
0
,
:,
0
,
0
],
expected_slice_along_seq_length
,
rtol
=
1e-3
)
tf
.
debugging
.
assert_near
(
chunked_hidden_states
[
0
,
0
,
:,
0
],
expected_slice_along_chunk
,
rtol
=
1e-3
)
def
test_layer_local_attn
(
self
):
hidden_states
=
self
.
_get_hidden_states
()
batch_size
,
seq_length
,
_
=
hidden_states
.
shape
layer
=
longformer_attention
.
LongformerAttention
(
num_heads
=
2
,
key_dim
=
4
,
value_dim
=
4
,
layer_id
=
0
,
attention_window
=
4
,
global_attention_size
=
0
,
)
attention_mask
=
tf
.
zeros
((
batch_size
,
seq_length
),
dtype
=
tf
.
dtypes
.
float32
)
is_index_global_attn
=
tf
.
math
.
greater
(
attention_mask
,
1
)
attention_mask
=
tf
.
where
(
tf
.
range
(
4
)[
None
,
:,
None
,
None
]
>
1
,
-
10000.0
,
attention_mask
[:,
:,
None
,
None
])
is_index_masked
=
tf
.
math
.
less
(
attention_mask
[:,
:,
0
,
0
],
0
)
output_hidden_states
=
layer
(
hidden_states
=
hidden_states
,
attention_mask
=
attention_mask
,
is_index_masked
=
is_index_masked
,
is_index_global_attn
=
is_index_global_attn
,
)[
0
]
self
.
assertTrue
(
output_hidden_states
.
shape
,
(
1
,
4
,
8
))
def
test_layer_global_attn
(
self
):
layer
=
longformer_attention
.
LongformerAttention
(
num_heads
=
2
,
key_dim
=
4
,
value_dim
=
4
,
layer_id
=
0
,
attention_window
=
4
,
global_attention_size
=
1
,
)
hidden_states
=
self
.
_get_hidden_states
()
hidden_states
=
tf
.
concat
(
[
self
.
_get_hidden_states
(),
self
.
_get_hidden_states
()
-
0.5
],
axis
=
0
)
_
,
seq_length
,
_
=
hidden_states
.
shape
# create attn mask
attention_mask_1
=
tf
.
zeros
((
1
,
1
,
1
,
seq_length
),
dtype
=
tf
.
dtypes
.
float32
)
attention_mask_2
=
tf
.
zeros
((
1
,
1
,
1
,
seq_length
),
dtype
=
tf
.
dtypes
.
float32
)
attention_mask_1
=
tf
.
where
(
tf
.
range
(
4
)[
None
,
:,
None
,
None
]
==
0
,
10000.0
,
attention_mask_1
)
attention_mask_1
=
tf
.
where
(
tf
.
range
(
4
)[
None
,
:,
None
,
None
]
>
2
,
-
10000.0
,
attention_mask_1
)
attention_mask_2
=
tf
.
where
(
tf
.
range
(
4
)[
None
,
:,
None
,
None
]
==
0
,
10000.0
,
attention_mask_2
)
attention_mask
=
tf
.
concat
([
attention_mask_1
,
attention_mask_2
],
axis
=
0
)
is_index_masked
=
tf
.
math
.
less
(
attention_mask
[:,
:,
0
,
0
],
0
)
is_index_global_attn
=
tf
.
math
.
greater
(
attention_mask
[:,
:,
0
,
0
],
0
)
output_hidden_states
=
layer
(
hidden_states
=
hidden_states
,
attention_mask
=-
tf
.
math
.
abs
(
attention_mask
),
is_index_masked
=
is_index_masked
,
is_index_global_attn
=
is_index_global_attn
,
)[
0
]
self
.
assertTrue
(
output_hidden_states
.
shape
,
(
2
,
4
,
8
))
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
official/projects/longformer/longformer_encoder.py
0 → 100644
View file @
4053c2ba
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from
typing
import
Any
,
Callable
,
List
,
Optional
,
Union
from
absl
import
logging
import
tensorflow
as
tf
from
official.modeling.tf_utils
import
get_shape_list
from
official.nlp.modeling
import
layers
from
official.projects.longformer.longformer_encoder_block
import
LongformerEncoderBlock
_Initializer
=
Union
[
str
,
tf
.
keras
.
initializers
.
Initializer
]
_approx_gelu
=
lambda
x
:
tf
.
keras
.
activations
.
gelu
(
x
,
approximate
=
True
)
class
LongformerEncoder
(
tf
.
keras
.
layers
.
Layer
):
"""LongformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def
__init__
(
self
,
vocab_size
:
int
,
attention_window
:
Union
[
List
[
int
],
int
]
=
512
,
global_attention_size
:
int
=
0
,
pad_token_id
:
int
=
1
,
hidden_size
:
int
=
768
,
num_layers
:
int
=
12
,
num_attention_heads
:
int
=
12
,
max_sequence_length
:
int
=
512
,
type_vocab_size
:
int
=
16
,
inner_dim
:
int
=
3072
,
inner_activation
:
Callable
[...,
Any
]
=
_approx_gelu
,
output_dropout
:
float
=
0.1
,
attention_dropout
:
float
=
0.1
,
initializer
:
_Initializer
=
tf
.
keras
.
initializers
.
TruncatedNormal
(
stddev
=
0.02
),
output_range
:
Optional
[
int
]
=
None
,
embedding_width
:
Optional
[
int
]
=
None
,
embedding_layer
:
Optional
[
tf
.
keras
.
layers
.
Layer
]
=
None
,
norm_first
:
bool
=
False
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
# Longformer args
self
.
_attention_window
=
attention_window
self
.
_global_attention_size
=
global_attention_size
self
.
_pad_token_id
=
pad_token_id
activation
=
tf
.
keras
.
activations
.
get
(
inner_activation
)
initializer
=
tf
.
keras
.
initializers
.
get
(
initializer
)
if
embedding_width
is
None
:
embedding_width
=
hidden_size
if
embedding_layer
is
None
:
self
.
_embedding_layer
=
layers
.
OnDeviceEmbedding
(
vocab_size
=
vocab_size
,
embedding_width
=
embedding_width
,
initializer
=
initializer
,
name
=
'word_embeddings'
)
else
:
self
.
_embedding_layer
=
embedding_layer
self
.
_position_embedding_layer
=
layers
.
PositionEmbedding
(
initializer
=
initializer
,
max_length
=
max_sequence_length
,
name
=
'position_embedding'
)
self
.
_type_embedding_layer
=
layers
.
OnDeviceEmbedding
(
vocab_size
=
type_vocab_size
,
embedding_width
=
embedding_width
,
initializer
=
initializer
,
use_one_hot
=
True
,
name
=
'type_embeddings'
)
self
.
_embedding_norm_layer
=
tf
.
keras
.
layers
.
LayerNormalization
(
name
=
'embeddings/layer_norm'
,
axis
=-
1
,
epsilon
=
1e-12
,
dtype
=
tf
.
float32
)
self
.
_embedding_dropout
=
tf
.
keras
.
layers
.
Dropout
(
rate
=
output_dropout
,
name
=
'embedding_dropout'
)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self
.
_embedding_projection
=
None
if
embedding_width
!=
hidden_size
:
self
.
_embedding_projection
=
tf
.
keras
.
layers
.
experimental
.
EinsumDense
(
'...x,xy->...y'
,
output_shape
=
hidden_size
,
bias_axes
=
'y'
,
kernel_initializer
=
initializer
,
name
=
'embedding_projection'
)
self
.
_transformer_layers
=
[]
self
.
_attention_mask_layer
=
layers
.
SelfAttentionMask
(
name
=
'self_attention_mask'
)
for
i
in
range
(
num_layers
):
layer
=
LongformerEncoderBlock
(
global_attention_size
=
global_attention_size
,
num_attention_heads
=
num_attention_heads
,
inner_dim
=
inner_dim
,
inner_activation
=
inner_activation
,
attention_window
=
attention_window
[
i
],
layer_id
=
i
,
output_dropout
=
output_dropout
,
attention_dropout
=
attention_dropout
,
norm_first
=
norm_first
,
output_range
=
output_range
if
i
==
num_layers
-
1
else
None
,
kernel_initializer
=
initializer
,
name
=
f
'transformer/layer_
{
i
}
'
)
self
.
_transformer_layers
.
append
(
layer
)
self
.
_pooler_layer
=
tf
.
keras
.
layers
.
Dense
(
units
=
hidden_size
,
activation
=
'tanh'
,
kernel_initializer
=
initializer
,
name
=
'pooler_transform'
)
self
.
_config
=
{
'vocab_size'
:
vocab_size
,
'hidden_size'
:
hidden_size
,
'num_layers'
:
num_layers
,
'num_attention_heads'
:
num_attention_heads
,
'max_sequence_length'
:
max_sequence_length
,
'type_vocab_size'
:
type_vocab_size
,
'inner_dim'
:
inner_dim
,
'inner_activation'
:
tf
.
keras
.
activations
.
serialize
(
activation
),
'output_dropout'
:
output_dropout
,
'attention_dropout'
:
attention_dropout
,
'initializer'
:
tf
.
keras
.
initializers
.
serialize
(
initializer
),
'output_range'
:
output_range
,
'embedding_width'
:
embedding_width
,
'embedding_layer'
:
embedding_layer
,
'norm_first'
:
norm_first
,
'attention_window'
:
attention_window
,
'global_attention_size'
:
global_attention_size
,
'pad_token_id'
:
pad_token_id
,
}
self
.
inputs
=
dict
(
input_word_ids
=
tf
.
keras
.
Input
(
shape
=
(
None
,),
dtype
=
tf
.
int32
),
input_mask
=
tf
.
keras
.
Input
(
shape
=
(
None
,),
dtype
=
tf
.
int32
),
input_type_ids
=
tf
.
keras
.
Input
(
shape
=
(
None
,),
dtype
=
tf
.
int32
))
def
call
(
self
,
inputs
):
word_embeddings
=
None
if
isinstance
(
inputs
,
dict
):
word_ids
=
inputs
.
get
(
'input_word_ids'
)
# input_ids
mask
=
inputs
.
get
(
'input_mask'
)
# attention_mask
type_ids
=
inputs
.
get
(
'input_type_ids'
)
# token_type_ids
word_embeddings
=
inputs
.
get
(
'input_word_embeddings'
,
None
)
# input_embeds
else
:
raise
ValueError
(
f
'Unexpected inputs type to
{
self
.
__class__
}
.'
)
(
padding_len
,
word_ids
,
mask
,
type_ids
,
word_embeddings
,
)
=
self
.
_pad_to_window_size
(
word_ids
=
word_ids
,
mask
=
mask
,
type_ids
=
type_ids
,
word_embeddings
=
word_embeddings
,
pad_token_id
=
self
.
_pad_token_id
)
if
word_embeddings
is
None
:
word_embeddings
=
self
.
_embedding_layer
(
word_ids
)
# absolute position embeddings.
position_embeddings
=
self
.
_position_embedding_layer
(
word_embeddings
)
type_embeddings
=
self
.
_type_embedding_layer
(
type_ids
)
embeddings
=
word_embeddings
+
position_embeddings
+
type_embeddings
embeddings
=
self
.
_embedding_norm_layer
(
embeddings
)
embeddings
=
self
.
_embedding_dropout
(
embeddings
)
if
self
.
_embedding_projection
is
not
None
:
embeddings
=
self
.
_embedding_projection
(
embeddings
)
batch_size
,
seq_len
=
get_shape_list
(
mask
)
# create masks with fixed len global_attention_size
mask
=
tf
.
transpose
(
tf
.
concat
(
values
=
[
tf
.
ones
(
(
self
.
_global_attention_size
,
batch_size
),
tf
.
int32
)
*
2
,
tf
.
transpose
(
mask
)[
self
.
_global_attention_size
:]
],
axis
=
0
))
is_index_masked
=
tf
.
math
.
less
(
mask
,
1
)
is_index_global_attn
=
tf
.
transpose
(
tf
.
concat
(
values
=
[
tf
.
ones
((
self
.
_global_attention_size
,
batch_size
),
tf
.
bool
),
tf
.
zeros
((
seq_len
-
self
.
_global_attention_size
,
batch_size
),
tf
.
bool
)
],
axis
=
0
))
# Longformer
attention_mask
=
mask
extended_attention_mask
=
tf
.
reshape
(
attention_mask
,
(
tf
.
shape
(
mask
)[
0
],
tf
.
shape
(
mask
)[
1
],
1
,
1
))
attention_mask
=
tf
.
cast
(
tf
.
math
.
abs
(
1
-
extended_attention_mask
),
tf
.
dtypes
.
float32
)
*
-
10000.0
encoder_outputs
=
[]
x
=
embeddings
# TFLongformerEncoder
for
layer
in
self
.
_transformer_layers
:
x
=
layer
([
x
,
attention_mask
,
is_index_masked
,
is_index_global_attn
])
encoder_outputs
.
append
(
x
)
last_encoder_output
=
encoder_outputs
[
-
1
]
if
padding_len
>
0
:
last_encoder_output
=
last_encoder_output
[:,
:
-
padding_len
]
first_token_tensor
=
last_encoder_output
[:,
0
,
:]
pooled_output
=
self
.
_pooler_layer
(
first_token_tensor
)
return
dict
(
sequence_output
=
last_encoder_output
,
pooled_output
=
pooled_output
,
encoder_outputs
=
encoder_outputs
)
def
get_embedding_table
(
self
):
return
self
.
_embedding_layer
.
embeddings
def
get_embedding_layer
(
self
):
return
self
.
_embedding_layer
def
get_config
(
self
):
return
dict
(
self
.
_config
)
@
property
def
transformer_layers
(
self
):
"""List of Transformer layers in the encoder."""
return
self
.
_transformer_layers
@
property
def
pooler_layer
(
self
):
"""The pooler dense layer after the transformer layers."""
return
self
.
_pooler_layer
@
classmethod
def
from_config
(
cls
,
config
,
custom_objects
=
None
):
if
'embedding_layer'
in
config
and
config
[
'embedding_layer'
]
is
not
None
:
warn_string
=
(
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print
(
'WARNING: '
+
warn_string
)
logging
.
warn
(
warn_string
)
return
cls
(
**
config
)
def
_pad_to_window_size
(
self
,
word_ids
,
mask
,
type_ids
,
word_embeddings
,
pad_token_id
,
):
# padding
attention_window
=
max
(
self
.
_attention_window
)
assert
(
attention_window
%
2
==
0
),
(
'`attention_window` should be an even value.'
f
'Given
{
attention_window
}
'
)
input_shape
=
get_shape_list
(
word_ids
)
if
word_ids
is
not
None
else
get_shape_list
(
word_embeddings
)
batch_size
,
seq_len
=
input_shape
[:
2
]
if
seq_len
is
not
None
:
padding_len
=
(
attention_window
-
seq_len
%
attention_window
)
%
attention_window
else
:
padding_len
=
0
paddings
=
tf
.
convert_to_tensor
([[
0
,
0
],
[
0
,
padding_len
]])
if
word_ids
is
not
None
:
word_ids
=
tf
.
pad
(
word_ids
,
paddings
,
constant_values
=
pad_token_id
)
if
word_embeddings
is
not
None
:
def
pad_embeddings
():
word_ids_padding
=
tf
.
fill
((
batch_size
,
padding_len
),
self
.
pad_token_id
)
word_embeddings_padding
=
self
.
_embedding_layer
(
word_ids_padding
)
return
tf
.
concat
([
word_embeddings
,
word_embeddings_padding
],
axis
=-
2
)
word_embeddings
=
tf
.
cond
(
tf
.
math
.
greater
(
padding_len
,
0
),
pad_embeddings
,
lambda
:
word_embeddings
)
mask
=
tf
.
pad
(
mask
,
paddings
,
constant_values
=
False
)
# no attention on the padding tokens
token_type_ids
=
tf
.
pad
(
type_ids
,
paddings
,
constant_values
=
0
)
# pad with token_type_id = 0
return
(
padding_len
,
word_ids
,
mask
,
token_type_ids
,
word_embeddings
,
)
Prev
1
2
3
4
5
…
12
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment