"src/fastertransformer/layers/FfnLayer.cc" did not exist on "720fc533da804ac3f46ee938864403e51fcd9fa7"
Commit 37b9a136 authored by syiming's avatar syiming
Browse files

remove unused code

parent cc2642a9
......@@ -102,7 +102,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor(
self._depth_multiplier = depth_multiplier
self._additional_layer_depth = additional_layer_depth
self._freeze_batchnorm = (not batch_norm_trainable)
self._override_base_feature_extractor_hyperparams =
self._override_base_feature_extractor_hyperparams = \
override_base_feature_extractor_hyperparams
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
......@@ -112,60 +112,6 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor(
self.classification_backbone = None
self._fpn_features_generator = None
def build(self,):
# TODO: Refine doc string
"""Build Resnet V1 FPN architecture."""
# full_resnet_v1_model = self._resnet_v1_base_model(
# batchnorm_training=self._train_batch_norm,
# conv_hyperparams=(self._conv_hyperparams
# if self._override_base_feature_extractor_hyperparams
# else None),
# min_depth=self._min_depth,
# depth_multiplier=self._depth_multiplier,
# classes=None,
# weights=None,
# include_top=False)
# output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name]
# outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
# for output_layer_name in output_layers]
# self.classification_backbone = tf.keras.Model(
# inputs=full_resnet_v1_model.inputs,
# outputs=outputs)
# self._depth_fn = lambda d: max(
# int(d * self._depth_multiplier), self._min_depth)
# self._base_fpn_max_level = min(self._fpn_max_level, 5)
# self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
# self._fpn_features_generator = (
# feature_map_generators.KerasFpnTopDownFeatureMaps(
# num_levels=self._num_levels,
# depth=self._depth_fn(self._additional_layer_depth),
# is_training=self._is_training,
# conv_hyperparams=self._conv_hyperparams,
# freeze_batchnorm=self._freeze_batchnorm,
# name='FeatureMaps'))
# Construct coarse feature layers
depth = self._depth_fn(self._additional_layer_depth)
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
self.built = True
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
......@@ -189,28 +135,6 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor(
else:
return resized_inputs
# def _extract_proposal_features(self, preprocessed_inputs, scope=None):
# # TODO: doc string
# """"""
# preprocessed_inputs = shape_utils.check_min_image_dim(
# 129, preprocessed_inputs)
# with tf.name_scope(scope):
# with tf.name_scope('ResnetV1FPN'):
# image_features = self.classification_backbone(preprocessed_inputs)
# feature_block_list = []
# for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
# feature_block_list.append('block{}'.format(level - 1))
# feature_block_map = dict(
# list(zip(self._resnet_block_names, image_features)))
# fpn_input_image_features = [
# (feature_block, feature_block_map[feature_block])
# for feature_block in feature_block_list]
# fpn_features = self._fpn_features_generator(fpn_input_image_features)
# return fpn_features
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
......@@ -262,32 +186,16 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor(
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, image_features)))
list(zip(self._resnet_block_names, backbone_outputs)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_extractor_model = tf.keras.models.Model(
inputs=self.full_resnet_v1_model.inputs, outputs=fpn_features)
inputs=full_resnet_v1_model.inputs, outputs=fpn_features)
return feature_extractor_model
# def _extract_box_classifier_features(self, proposal_feature_maps, scope=None):
# with tf.name_scope(scope):
# with tf.name_scope('ResnetV1FPN'):
# feature_maps = []
# for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
# feature_maps.append(proposal_feature_maps['top_down_block{}'.format(level-1)])
# self.last_feature_map = proposal_feature_maps['top_down_block{}'.format(
# self._base_fpn_max_level - 1)]
# for coarse_feature_layers in self._coarse_feature_layers:
# for layer in coarse_feature_layers:
# last_feature_map = layer(last_feature_map)
# feature_maps.append(self.last_feature_map)
# return feature_maps
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
......@@ -309,8 +217,8 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor(
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
feature_extractor_model = tf.keras.models.Sequential([
Dense(unit=1024, activation='ReLU'),
Dense(unit=1024, activation='ReLU')
tf.keras.layers.Dense(unit=1024, activation='ReLU'),
tf.keras.layers.Dense(unit=1024, activation='ReLU')
])
return feature_extractor_model
......@@ -346,10 +254,10 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor(
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
super(FasterRCNNResnet50FPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparameters,
conv_hyperparams=conv_hyperparams,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
......@@ -391,10 +299,10 @@ class FasterRCNNResnet101FPNKerasFeatureExtractor(
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
super(FasterRCNNResnet101FPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparameters,
conv_hyperparams=conv_hyperparams,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
......@@ -438,10 +346,10 @@ class FasterRCNNResnet152FPNKerasFeatureExtractor(
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
super(FasterRCNNResnet152FPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparameters,
conv_hyperparams=conv_hyperparams,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment