Commit 3e7fe8a1 authored by miguelCalado's avatar miguelCalado
Browse files

Fixed pylint and added docstrings

parent b219bbf4
...@@ -36,7 +36,7 @@ from official.modeling import hyperparams ...@@ -36,7 +36,7 @@ from official.modeling import hyperparams
from official.modeling import performance from official.modeling import performance
from official.utils import hyperparams_flags from official.utils import hyperparams_flags
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.vision.image_classification.vgg16 import vgg_model from official.legacy.image_classification.vgg16 import vgg_model
def get_models() -> Mapping[str, tf.keras.Model]: def get_models() -> Mapping[str, tf.keras.Model]:
......
...@@ -19,9 +19,8 @@ from __future__ import division ...@@ -19,9 +19,8 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import dataclasses import dataclasses
from official.legacy.image_classification.configs import base_configs
from official.modeling.hyperparams import base_config from official.modeling.hyperparams import base_config
from official.vision.image_classification.configs import base_configs
@dataclasses.dataclass @dataclasses.dataclass
......
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG16 model for Keras.
Adapted from tf.keras.applications.vgg16.VGG16().
Related papers/blogs:
- https://arxiv.org/abs/1409.1556
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -6,16 +28,30 @@ import tensorflow as tf ...@@ -6,16 +28,30 @@ import tensorflow as tf
layers = tf.keras.layers layers = tf.keras.layers
def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4): def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4):
return tf.keras.regularizers.L2( return tf.keras.regularizers.L2(
l2_weight_decay) if use_l2_regularizer else None l2_weight_decay) if use_l2_regularizer else None
def vgg16(num_classes, def vgg16(num_classes,
batch_size=None, batch_size=None,
use_l2_regularizer=True, use_l2_regularizer=True,
batch_norm_decay=0.9, batch_norm_decay=0.9,
batch_norm_epsilon=1e-5): batch_norm_epsilon=1e-5):
"""Instantiates the VGG16 architecture
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
batch_norm_decay: Moment of batch norm layers.
batch_norm_epsilon: Epsilon of batch borm layers.
Returns:
A Keras model instance.
"""
input_shape = (224, 224, 3) input_shape = (224, 224, 3)
img_input = layers.Input(shape=input_shape, batch_size=batch_size) img_input = layers.Input(shape=input_shape, batch_size=batch_size)
...@@ -26,9 +62,9 @@ def vgg16(num_classes, ...@@ -26,9 +62,9 @@ def vgg16(num_classes,
bn_axis = 1 bn_axis = 1
else: # channels_last else: # channels_last
bn_axis = 3 bn_axis = 3
# Block 1 # Block 1
x = layers.Conv2D(64, (3, 3), x = layers.Conv2D(
64, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block1_conv1')(x) name='block1_conv1')(x)
...@@ -38,7 +74,8 @@ def vgg16(num_classes, ...@@ -38,7 +74,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv1')(x) name='bn_conv1')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(64, (3, 3), x = layers.Conv2D(
64, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block1_conv2')(x) name='block1_conv2')(x)
...@@ -51,7 +88,8 @@ def vgg16(num_classes, ...@@ -51,7 +88,8 @@ def vgg16(num_classes,
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2 # Block 2
x = layers.Conv2D(128, (3, 3), x = layers.Conv2D(
128, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block2_conv1')(x) name='block2_conv1')(x)
...@@ -61,7 +99,8 @@ def vgg16(num_classes, ...@@ -61,7 +99,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv3')(x) name='bn_conv3')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(128, (3, 3), x = layers.Conv2D(
128, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block2_conv2')(x) name='block2_conv2')(x)
...@@ -74,7 +113,8 @@ def vgg16(num_classes, ...@@ -74,7 +113,8 @@ def vgg16(num_classes,
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3 # Block 3
x = layers.Conv2D(256, (3, 3), x = layers.Conv2D(
256, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block3_conv1')(x) name='block3_conv1')(x)
...@@ -84,7 +124,8 @@ def vgg16(num_classes, ...@@ -84,7 +124,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv5')(x) name='bn_conv5')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(256, (3, 3), x = layers.Conv2D(
256, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block3_conv2')(x) name='block3_conv2')(x)
...@@ -94,7 +135,8 @@ def vgg16(num_classes, ...@@ -94,7 +135,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv6')(x) name='bn_conv6')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(256, (3, 3), x = layers.Conv2D(
256, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block3_conv3')(x) name='block3_conv3')(x)
...@@ -107,7 +149,8 @@ def vgg16(num_classes, ...@@ -107,7 +149,8 @@ def vgg16(num_classes,
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4 # Block 4
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block4_conv1')(x) name='block4_conv1')(x)
...@@ -117,7 +160,8 @@ def vgg16(num_classes, ...@@ -117,7 +160,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv8')(x) name='bn_conv8')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block4_conv2')(x) name='block4_conv2')(x)
...@@ -127,7 +171,8 @@ def vgg16(num_classes, ...@@ -127,7 +171,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv9')(x) name='bn_conv9')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block4_conv3')(x) name='block4_conv3')(x)
...@@ -140,7 +185,8 @@ def vgg16(num_classes, ...@@ -140,7 +185,8 @@ def vgg16(num_classes,
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5 # Block 5
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block5_conv1')(x) name='block5_conv1')(x)
...@@ -150,7 +196,8 @@ def vgg16(num_classes, ...@@ -150,7 +196,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv11')(x) name='bn_conv11')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block5_conv2')(x) name='block5_conv2')(x)
...@@ -160,7 +207,8 @@ def vgg16(num_classes, ...@@ -160,7 +207,8 @@ def vgg16(num_classes,
epsilon=batch_norm_epsilon, epsilon=batch_norm_epsilon,
name='bn_conv12')(x) name='bn_conv12')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Conv2D(512, (3, 3), x = layers.Conv2D(
512, (3, 3),
padding='same', padding='same',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='block5_conv3')(x) name='block5_conv3')(x)
...@@ -174,23 +222,17 @@ def vgg16(num_classes, ...@@ -174,23 +222,17 @@ def vgg16(num_classes,
x = layers.Flatten(name='flatten')(x) x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, x = layers.Dense(4096,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1')(x) name='fc1')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Dropout(0.5)(x) x = layers.Dropout(0.5)(x)
x = layers.Dense(4096, x = layers.Dense(4096,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc2')(x) name='fc2')(x)
x = layers.Activation('relu')(x) x = layers.Activation('relu')(x)
x = layers.Dropout(0.5)(x) x = layers.Dropout(0.5)(x)
x = layers.Dense(num_classes, x = layers.Dense(num_classes,
#kernel_initializer=tf.initializers.random_normal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer), kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
#bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(x) name='fc1000')(x)
# A softmax that is followed by the model loss must be done cannot be done # A softmax that is followed by the model loss must be done cannot be done
...@@ -199,4 +241,3 @@ def vgg16(num_classes, ...@@ -199,4 +241,3 @@ def vgg16(num_classes,
# Create model. # Create model.
return tf.keras.Model(img_input, x, name='vgg16') return tf.keras.Model(img_input, x, name='vgg16')
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment