Unverified Commit 09d9656f authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'panoptic-segmentation' into panoptic-deeplab-modeling

parents ac671306 49a5706c
......@@ -18,12 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from official.vision.detection.modeling.architecture import fpn
from official.vision.detection.modeling.architecture import heads
from official.vision.detection.modeling.architecture import identity
from official.vision.detection.modeling.architecture import nn_ops
from official.vision.detection.modeling.architecture import resnet
from official.vision.detection.modeling.architecture import spinenet
from official.legacy.detection.modeling.architecture import fpn
from official.legacy.detection.modeling.architecture import heads
from official.legacy.detection.modeling.architecture import identity
from official.legacy.detection.modeling.architecture import nn_ops
from official.legacy.detection.modeling.architecture import resnet
from official.legacy.detection.modeling.architecture import spinenet
def norm_activation_generator(params):
......
......@@ -28,8 +28,8 @@ import functools
import tensorflow as tf
from official.vision.detection.modeling.architecture import nn_ops
from official.vision.detection.ops import spatial_transform_ops
from official.legacy.detection.modeling.architecture import nn_ops
from official.legacy.detection.ops import spatial_transform_ops
class Fpn(object):
......@@ -52,6 +52,7 @@ class Fpn(object):
fpn_feat_dims: `int` number of filters in FPN layers.
use_separable_conv: `bool`, if True use separable convolution for
convolution in FPN layers.
activation: the activation function.
use_batch_norm: 'bool', indicating whether batchnorm layers are added.
norm_activation: an operation that includes a normalization layer
followed by an optional activation layer.
......
......@@ -23,8 +23,8 @@ import functools
import numpy as np
import tensorflow as tf
from official.vision.detection.modeling.architecture import nn_ops
from official.vision.detection.ops import spatial_transform_ops
from official.legacy.detection.modeling.architecture import nn_ops
from official.legacy.detection.ops import spatial_transform_ops
class RpnHead(tf.keras.layers.Layer):
......
......@@ -23,7 +23,6 @@ import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock(tf.keras.layers.Layer):
"""A residual block."""
......@@ -163,7 +162,6 @@ class ResidualBlock(tf.keras.layers.Layer):
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock(tf.keras.layers.Layer):
"""A standard bottleneck block."""
......
......@@ -45,11 +45,11 @@ class NormActivation(tf.keras.layers.Layer):
layer.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0. If False, initialize it with 1.
fused: `bool` fused option in batch normalziation.
use_actiation: `bool`, whether to add the optional activation layer after
use_activation: `bool`, whether to add the optional activation layer after
the batch normalization layer.
activation: 'string', the type of the activation layer. Currently support
`relu` and `swish`.
fused: `bool` fused option in batch normalziation.
name: `str` name for the operation.
"""
super(NormActivation, self).__init__(trainable=trainable)
......
......@@ -24,7 +24,7 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.vision.detection.modeling.architecture import nn_ops
from official.legacy.detection.modeling.architecture import nn_ops
# TODO(b/140112644): Refactor the code with Keras style, i.e. build and call.
......@@ -41,6 +41,7 @@ class Resnet(object):
Args:
resnet_depth: `int` depth of ResNet backbone model.
activation: the activation function.
norm_activation: an operation that includes a normalization layer followed
by an optional activation layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
......
......@@ -24,9 +24,8 @@ import math
from absl import logging
import tensorflow as tf
from official.legacy.detection.modeling.architecture import nn_blocks
from official.modeling import tf_utils
from official.vision.detection.modeling.architecture import nn_blocks
layers = tf.keras.layers
......@@ -113,7 +112,6 @@ def build_block_specs(block_specs=None):
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpineNet(tf.keras.Model):
"""Class to build SpineNet models."""
......
......@@ -19,13 +19,12 @@ from __future__ import division
from __future__ import print_function
import abc
import functools
import re
import tensorflow as tf
from official.vision.detection.modeling import checkpoint_utils
from official.vision.detection.modeling import learning_rates
from official.vision.detection.modeling import optimizers
from official.legacy.detection.modeling import checkpoint_utils
from official.legacy.detection.modeling import learning_rates
from official.legacy.detection.modeling import optimizers
def _make_filter_trainable_variables_fn(frozen_variable_prefix):
......
......@@ -33,8 +33,9 @@ def _build_assignment_map(keras_model,
prefix='',
skip_variables_regex=None,
var_to_shape_map=None):
"""Compute an assignment mapping for loading older checkpoints into a Keras
"""Builds the variable assignment map.
Compute an assignment mapping for loading older checkpoints into a Keras
model. Variable names are remapped from the original TPUEstimator model to
the new Keras name.
......@@ -53,10 +54,12 @@ def _build_assignment_map(keras_model,
checkpoint_names = []
if var_to_shape_map:
# pylint: disable=g-long-lambda
checkpoint_names = list(
filter(
lambda x: not x.endswith('Momentum') and not x.endswith(
'global_step'), var_to_shape_map.keys()))
# pylint: enable=g-long-lambda
logging.info('Number of variables in the checkpoint %d',
len(checkpoint_names))
......@@ -77,7 +80,9 @@ def _build_assignment_map(keras_model,
continue
# Match name with variables in the checkpoint.
# pylint: disable=cell-var-from-loop
match_names = list(filter(lambda x: x.endswith(var_name), checkpoint_names))
# pylint: enable=cell-var-from-loop
try:
if match_names:
assert len(match_names) == 1, 'more then on matches for {}: {}'.format(
......
......@@ -15,10 +15,10 @@
"""Factory to build detection model."""
from official.vision.detection.modeling import maskrcnn_model
from official.vision.detection.modeling import olnmask_model
from official.vision.detection.modeling import retinanet_model
from official.vision.detection.modeling import shapemask_model
from official.legacy.detection.modeling import maskrcnn_model
from official.legacy.detection.modeling import olnmask_model
from official.legacy.detection.modeling import retinanet_model
from official.legacy.detection.modeling import shapemask_model
def model_generator(params):
......
......@@ -18,8 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from official.modeling.hyperparams import params_dict
......
......@@ -20,17 +20,17 @@ from __future__ import print_function
import tensorflow as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
from official.vision.detection.ops import roi_ops
from official.vision.detection.ops import spatial_transform_ops
from official.vision.detection.ops import target_ops
from official.vision.detection.utils import box_utils
from official.legacy.detection.dataloader import anchor
from official.legacy.detection.dataloader import mode_keys
from official.legacy.detection.evaluation import factory as eval_factory
from official.legacy.detection.modeling import base_model
from official.legacy.detection.modeling import losses
from official.legacy.detection.modeling.architecture import factory
from official.legacy.detection.ops import postprocess_ops
from official.legacy.detection.ops import roi_ops
from official.legacy.detection.ops import spatial_transform_ops
from official.legacy.detection.ops import target_ops
from official.legacy.detection.utils import box_utils
class MaskrcnnModel(base_model.Model):
......
......@@ -20,16 +20,16 @@ from __future__ import print_function
import tensorflow as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.modeling.maskrcnn_model import MaskrcnnModel
from official.vision.detection.ops import postprocess_ops
from official.vision.detection.ops import roi_ops
from official.vision.detection.ops import spatial_transform_ops
from official.vision.detection.ops import target_ops
from official.vision.detection.utils import box_utils
from official.legacy.detection.dataloader import anchor
from official.legacy.detection.dataloader import mode_keys
from official.legacy.detection.modeling import losses
from official.legacy.detection.modeling.architecture import factory
from official.legacy.detection.modeling.maskrcnn_model import MaskrcnnModel
from official.legacy.detection.ops import postprocess_ops
from official.legacy.detection.ops import roi_ops
from official.legacy.detection.ops import spatial_transform_ops
from official.legacy.detection.ops import target_ops
from official.legacy.detection.utils import box_utils
class OlnMaskModel(MaskrcnnModel):
......
......@@ -20,7 +20,6 @@ from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
......
......@@ -20,12 +20,12 @@ from __future__ import print_function
import tensorflow as tf
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
from official.legacy.detection.dataloader import mode_keys
from official.legacy.detection.evaluation import factory as eval_factory
from official.legacy.detection.modeling import base_model
from official.legacy.detection.modeling import losses
from official.legacy.detection.modeling.architecture import factory
from official.legacy.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
......@@ -131,13 +131,13 @@ class RetinanetModel(base_model.Model):
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
raise ValueError('"%s" is missing in outputs, requried %s found %s' %
(field, required_output_fields, outputs.keys()))
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
raise ValueError('"%s" is missing in outputs, requried %s found %s' %
(field, required_label_fields, labels.keys()))
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'], labels['anchor_boxes'],
labels['image_info'][:, 1:2, :])
......
......@@ -20,14 +20,14 @@ from __future__ import print_function
import tensorflow as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
from official.vision.detection.utils import box_utils
from official.legacy.detection.dataloader import anchor
from official.legacy.detection.dataloader import mode_keys
from official.legacy.detection.evaluation import factory as eval_factory
from official.legacy.detection.modeling import base_model
from official.legacy.detection.modeling import losses
from official.legacy.detection.modeling.architecture import factory
from official.legacy.detection.ops import postprocess_ops
from official.legacy.detection.utils import box_utils
class ShapeMaskModel(base_model.Model):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment