Commit d7f8ea9a authored by A. Unique TensorFlower's avatar A. Unique TensorFlower Committed by TF Object Detection Team
Browse files

Revert addition of explicit `training` kwarg to feature map and box predictor

PiperOrigin-RevId: 362098420
parent cca677c9
...@@ -305,7 +305,7 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model): ...@@ -305,7 +305,7 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model):
# this net must be appended only once it's been filled with layers # this net must be appended only once it's been filled with layers
self.convolutions.append(net) self.convolutions.append(net)
def call(self, image_features, training=None): def call(self, image_features):
"""Generate the multi-resolution feature maps. """Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input. Executed when calling the `.__call__` method on input.
...@@ -313,11 +313,6 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model): ...@@ -313,11 +313,6 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model):
Args: Args:
image_features: A dictionary of handles to activation tensors from the image_features: A dictionary of handles to activation tensors from the
base feature extractor. base feature extractor.
training: A boolean, True when in training mode. If not specified,
defaults to (in order of priority): the training mode of the outer
`Layer.call`; the default mode set by
`tf.keras.backend.set_learning_phase`; or the default value for
`training` in the call signature.
Returns: Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to feature_maps: an OrderedDict mapping keys (feature map names) to
...@@ -333,7 +328,7 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model): ...@@ -333,7 +328,7 @@ class KerasMultiResolutionFeatureMaps(tf.keras.Model):
else: else:
feature_map = feature_maps[-1] feature_map = feature_maps[-1]
for layer in self.convolutions[index]: for layer in self.convolutions[index]:
feature_map = layer(feature_map, training=training) feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name) feature_map_keys.append(layer_name)
feature_maps.append(feature_map) feature_maps.append(feature_map)
...@@ -621,7 +616,7 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model): ...@@ -621,7 +616,7 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model):
self.reshape_blocks.append(reshaped_residual) self.reshape_blocks.append(reshaped_residual)
self.conv_layers.append(conv_net) self.conv_layers.append(conv_net)
def call(self, image_features, training=None): def call(self, image_features):
"""Generate the multi-resolution feature maps. """Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input. Executed when calling the `.__call__` method on input.
...@@ -630,11 +625,6 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model): ...@@ -630,11 +625,6 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model):
image_features: list of tuples of (tensor_name, image_feature_tensor). image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2. of 2.
training: A boolean, True when in training mode. If not specified,
defaults to (in order of priority): the training mode of the outer
`Layer.call`; the default mode set by
`tf.keras.backend.set_learning_phase`; or the default value for
`training` in the call signature.
Returns: Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to feature_maps: an OrderedDict mapping keys (feature map names) to
...@@ -646,7 +636,7 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model): ...@@ -646,7 +636,7 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model):
with tf.name_scope(self.scope): with tf.name_scope(self.scope):
top_down = image_features[-1][1] top_down = image_features[-1][1]
for layer in self.top_layers: for layer in self.top_layers:
top_down = layer(top_down, training=training) top_down = layer(top_down)
output_feature_maps_list.append(top_down) output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[-1][0]) output_feature_map_keys.append('top_down_%s' % image_features[-1][0])
...@@ -655,14 +645,14 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model): ...@@ -655,14 +645,14 @@ class KerasFpnTopDownFeatureMaps(tf.keras.Model):
residual = image_features[level][1] residual = image_features[level][1]
top_down = output_feature_maps_list[-1] top_down = output_feature_maps_list[-1]
for layer in self.residual_blocks[index]: for layer in self.residual_blocks[index]:
residual = layer(residual, training=training) residual = layer(residual)
for layer in self.top_down_blocks[index]: for layer in self.top_down_blocks[index]:
top_down = layer(top_down, training=training) top_down = layer(top_down)
for layer in self.reshape_blocks[index]: for layer in self.reshape_blocks[index]:
top_down = layer([residual, top_down], training=training) top_down = layer([residual, top_down])
top_down += residual top_down += residual
for layer in self.conv_layers[index]: for layer in self.conv_layers[index]:
top_down = layer(top_down, training=training) top_down = layer(top_down)
output_feature_maps_list.append(top_down) output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[level][0]) output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed( return collections.OrderedDict(reversed(
......
...@@ -197,7 +197,7 @@ class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): ...@@ -197,7 +197,7 @@ class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor):
# Apply shared conv layers before the head predictors. # Apply shared conv layers before the head predictors.
for layer in self._shared_nets[index]: for layer in self._shared_nets[index]:
net = layer(net, training=self._is_training) net = layer(net)
for head_name in self._sorted_head_names: for head_name in self._sorted_head_names:
head_obj = self._prediction_heads[head_name][index] head_obj = self._prediction_heads[head_name][index]
...@@ -458,13 +458,13 @@ class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): ...@@ -458,13 +458,13 @@ class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor):
def _apply_layers(base_tower_layers, image_feature): def _apply_layers(base_tower_layers, image_feature):
for layer in base_tower_layers: for layer in base_tower_layers:
image_feature = layer(image_feature, training=self._is_training) image_feature = layer(image_feature)
return image_feature return image_feature
for (index, image_feature) in enumerate(image_features): for (index, image_feature) in enumerate(image_features):
# Apply additional projection layers to image features # Apply additional projection layers to image features
for layer in self._additional_projection_layers[index]: for layer in self._additional_projection_layers[index]:
image_feature = layer(image_feature, training=self._is_training) image_feature = layer(image_feature)
# Apply box tower layers. # Apply box tower layers.
box_tower_feature = _apply_layers( box_tower_feature = _apply_layers(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment