Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
3e7fe8a1
Commit
3e7fe8a1
authored
Jan 08, 2022
by
miguelCalado
Browse files
Fixed pylint and added docstrings
parent
b219bbf4
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
103 additions
and
63 deletions
+103
-63
official/legacy/image_classification/classifier_trainer.py
official/legacy/image_classification/classifier_trainer.py
+1
-1
official/legacy/image_classification/vgg16/vgg_config.py
official/legacy/image_classification/vgg16/vgg_config.py
+1
-2
official/legacy/image_classification/vgg16/vgg_model.py
official/legacy/image_classification/vgg16/vgg_model.py
+101
-60
No files found.
official/legacy/image_classification/classifier_trainer.py
View file @
3e7fe8a1
...
@@ -36,7 +36,7 @@ from official.modeling import hyperparams
...
@@ -36,7 +36,7 @@ from official.modeling import hyperparams
from
official.modeling
import
performance
from
official.modeling
import
performance
from
official.utils
import
hyperparams_flags
from
official.utils
import
hyperparams_flags
from
official.utils.misc
import
keras_utils
from
official.utils.misc
import
keras_utils
from
official.
vision
.image_classification.vgg16
import
vgg_model
from
official.
legacy
.image_classification.vgg16
import
vgg_model
def
get_models
()
->
Mapping
[
str
,
tf
.
keras
.
Model
]:
def
get_models
()
->
Mapping
[
str
,
tf
.
keras
.
Model
]:
...
...
official/legacy/image_classification/vgg16/vgg_config.py
View file @
3e7fe8a1
...
@@ -19,9 +19,8 @@ from __future__ import division
...
@@ -19,9 +19,8 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
dataclasses
import
dataclasses
from
official.legacy.image_classification.configs
import
base_configs
from
official.modeling.hyperparams
import
base_config
from
official.modeling.hyperparams
import
base_config
from
official.vision.image_classification.configs
import
base_configs
@
dataclasses
.
dataclass
@
dataclasses
.
dataclass
...
...
official/legacy/image_classification/vgg16/vgg_model.py
View file @
3e7fe8a1
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG16 model for Keras.
Adapted from tf.keras.applications.vgg16.VGG16().
Related papers/blogs:
- https://arxiv.org/abs/1409.1556
"""
from
__future__
import
absolute_import
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
...
@@ -6,16 +28,30 @@ import tensorflow as tf
...
@@ -6,16 +28,30 @@ import tensorflow as tf
layers
=
tf
.
keras
.
layers
layers
=
tf
.
keras
.
layers
def
_gen_l2_regularizer
(
use_l2_regularizer
=
True
,
l2_weight_decay
=
1e-4
):
def
_gen_l2_regularizer
(
use_l2_regularizer
=
True
,
l2_weight_decay
=
1e-4
):
return
tf
.
keras
.
regularizers
.
L2
(
return
tf
.
keras
.
regularizers
.
L2
(
l2_weight_decay
)
if
use_l2_regularizer
else
None
l2_weight_decay
)
if
use_l2_regularizer
else
None
def
vgg16
(
num_classes
,
def
vgg16
(
num_classes
,
batch_size
=
None
,
batch_size
=
None
,
use_l2_regularizer
=
True
,
use_l2_regularizer
=
True
,
batch_norm_decay
=
0.9
,
batch_norm_decay
=
0.9
,
batch_norm_epsilon
=
1e-5
):
batch_norm_epsilon
=
1e-5
):
"""Instantiates the VGG16 architecture
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
A Keras model instance.
"""
input_shape
=
(
224
,
224
,
3
)
input_shape
=
(
224
,
224
,
3
)
img_input
=
layers
.
Input
(
shape
=
input_shape
,
batch_size
=
batch_size
)
img_input
=
layers
.
Input
(
shape
=
input_shape
,
batch_size
=
batch_size
)
...
@@ -26,22 +62,23 @@ def vgg16(num_classes,
...
@@ -26,22 +62,23 @@ def vgg16(num_classes,
bn_axis
=
1
bn_axis
=
1
else
:
# channels_last
else
:
# channels_last
bn_axis
=
3
bn_axis
=
3
# Block 1
# Block 1
x
=
layers
.
Conv2D
(
64
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
64
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block1_conv1'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block1_conv1'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv1'
)(
x
)
name
=
'bn_conv1'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
64
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
64
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block1_conv2'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block1_conv2'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
...
@@ -51,20 +88,22 @@ def vgg16(num_classes,
...
@@ -51,20 +88,22 @@ def vgg16(num_classes,
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block1_pool'
)(
x
)
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block1_pool'
)(
x
)
# Block 2
# Block 2
x
=
layers
.
Conv2D
(
128
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
128
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block2_conv1'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block2_conv1'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv3'
)(
x
)
name
=
'bn_conv3'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
128
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
128
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block2_conv2'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block2_conv2'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
...
@@ -74,30 +113,33 @@ def vgg16(num_classes,
...
@@ -74,30 +113,33 @@ def vgg16(num_classes,
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block2_pool'
)(
x
)
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block2_pool'
)(
x
)
# Block 3
# Block 3
x
=
layers
.
Conv2D
(
256
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
256
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block3_conv1'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block3_conv1'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv5'
)(
x
)
name
=
'bn_conv5'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
256
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
256
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block3_conv2'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block3_conv2'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv6'
)(
x
)
name
=
'bn_conv6'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
256
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
256
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block3_conv3'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block3_conv3'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
...
@@ -107,30 +149,33 @@ def vgg16(num_classes,
...
@@ -107,30 +149,33 @@ def vgg16(num_classes,
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block3_pool'
)(
x
)
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block3_pool'
)(
x
)
# Block 4
# Block 4
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block4_conv1'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block4_conv1'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv8'
)(
x
)
name
=
'bn_conv8'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block4_conv2'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block4_conv2'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv9'
)(
x
)
name
=
'bn_conv9'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block4_conv3'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block4_conv3'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
...
@@ -140,30 +185,33 @@ def vgg16(num_classes,
...
@@ -140,30 +185,33 @@ def vgg16(num_classes,
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block4_pool'
)(
x
)
x
=
layers
.
MaxPooling2D
((
2
,
2
),
strides
=
(
2
,
2
),
name
=
'block4_pool'
)(
x
)
# Block 5
# Block 5
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block5_conv1'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block5_conv1'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv11'
)(
x
)
name
=
'bn_conv11'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block5_conv2'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block5_conv2'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
epsilon
=
batch_norm_epsilon
,
epsilon
=
batch_norm_epsilon
,
name
=
'bn_conv12'
)(
x
)
name
=
'bn_conv12'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Conv2D
(
512
,
(
3
,
3
),
x
=
layers
.
Conv2D
(
padding
=
'same'
,
512
,
(
3
,
3
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
padding
=
'same'
,
name
=
'block5_conv3'
)(
x
)
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
name
=
'block5_conv3'
)(
x
)
x
=
layers
.
BatchNormalization
(
x
=
layers
.
BatchNormalization
(
axis
=
bn_axis
,
axis
=
bn_axis
,
momentum
=
batch_norm_decay
,
momentum
=
batch_norm_decay
,
...
@@ -174,23 +222,17 @@ def vgg16(num_classes,
...
@@ -174,23 +222,17 @@ def vgg16(num_classes,
x
=
layers
.
Flatten
(
name
=
'flatten'
)(
x
)
x
=
layers
.
Flatten
(
name
=
'flatten'
)(
x
)
x
=
layers
.
Dense
(
4096
,
x
=
layers
.
Dense
(
4096
,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name
=
'fc1'
)(
x
)
name
=
'fc1'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Dropout
(
0.5
)(
x
)
x
=
layers
.
Dropout
(
0.5
)(
x
)
x
=
layers
.
Dense
(
4096
,
x
=
layers
.
Dense
(
4096
,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name
=
'fc2'
)(
x
)
name
=
'fc2'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Activation
(
'relu'
)(
x
)
x
=
layers
.
Dropout
(
0.5
)(
x
)
x
=
layers
.
Dropout
(
0.5
)(
x
)
x
=
layers
.
Dense
(
num_classes
,
x
=
layers
.
Dense
(
num_classes
,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
kernel_regularizer
=
_gen_l2_regularizer
(
use_l2_regularizer
),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name
=
'fc1000'
)(
x
)
name
=
'fc1000'
)(
x
)
# A softmax that is followed by the model loss must be done cannot be done
# A softmax that is followed by the model loss must be done cannot be done
...
@@ -199,4 +241,3 @@ def vgg16(num_classes,
...
@@ -199,4 +241,3 @@ def vgg16(num_classes,
# Create model.
# Create model.
return
tf
.
keras
.
Model
(
img_input
,
x
,
name
=
'vgg16'
)
return
tf
.
keras
.
Model
(
img_input
,
x
,
name
=
'vgg16'
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment