Commit 657dcda5 authored by Kaushik Shivakumar's avatar Kaushik Shivakumar
Browse files

pull latest

parents 26e24e21 e6017471
...@@ -74,6 +74,9 @@ class FakeModel(model.DetectionModel): ...@@ -74,6 +74,9 @@ class FakeModel(model.DetectionModel):
def restore_map(self, checkpoint_path, from_detection_checkpoint): def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes): def loss(self, prediction_dict, true_image_shapes):
pass pass
...@@ -416,7 +419,7 @@ class ExportTfliteGraphTest(tf.test.TestCase): ...@@ -416,7 +419,7 @@ class ExportTfliteGraphTest(tf.test.TestCase):
tflite_graph_file = self._export_graph_with_postprocessing_op( tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config) pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file)) self.assertTrue(os.path.exists(tflite_graph_file))
mock_get.assert_called_once() self.assertEqual(1, mock_get.call_count)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -76,6 +76,9 @@ class FakeModel(model.DetectionModel): ...@@ -76,6 +76,9 @@ class FakeModel(model.DetectionModel):
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes): def loss(self, prediction_dict, true_image_shapes):
pass pass
......
...@@ -105,6 +105,9 @@ class FakeModel(model.DetectionModel): ...@@ -105,6 +105,9 @@ class FakeModel(model.DetectionModel):
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes): def loss(self, prediction_dict, true_image_shapes):
pass pass
......
...@@ -30,9 +30,12 @@ pip install apache-beam ...@@ -30,9 +30,12 @@ pip install apache-beam
``` ```
and can be run locally, or on a cluster for efficient processing of large and can be run locally, or on a cluster for efficient processing of large
amounts of data. See the amounts of data. Note that generate_detection_data.py and
generate_embedding_data.py both involve running inference, and may be very slow
to run locally. See the
[Apache Beam documentation](https://beam.apache.org/documentation/runners/dataflow/) [Apache Beam documentation](https://beam.apache.org/documentation/runners/dataflow/)
for more information. for more information, and Google Cloud Documentation for a tutorial on
[running Beam jobs on DataFlow](https://cloud.google.com/dataflow/docs/quickstarts/quickstart-python).
### Generating TfRecords from a set of images and a COCO-CameraTraps style JSON ### Generating TfRecords from a set of images and a COCO-CameraTraps style JSON
...@@ -191,3 +194,6 @@ python export_inference_graph.py \ ...@@ -191,3 +194,6 @@ python export_inference_graph.py \
--side_input_types float,int --side_input_types float,int
``` ```
If you have questions about Context R-CNN, please contact
[Sara Beery](https://beerys.github.io/).
...@@ -27,6 +27,7 @@ from object_detection.builders import model_builder ...@@ -27,6 +27,7 @@ from object_detection.builders import model_builder
from object_detection.builders import preprocessor_builder from object_detection.builders import preprocessor_builder
from object_detection.core import box_list from object_detection.core import box_list
from object_detection.core import box_list_ops from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops from object_detection.core import keypoint_ops
from object_detection.core import preprocessor from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields from object_detection.core import standard_fields as fields
...@@ -289,6 +290,13 @@ def transform_input_data(tensor_dict, ...@@ -289,6 +290,13 @@ def transform_input_data(tensor_dict,
out_tensor_dict[flds_gt_kpt_vis], out_tensor_dict[flds_gt_kpt_vis],
keypoint_type_weight)) keypoint_type_weight))
dp_surface_coords_fld = fields.InputDataFields.groundtruth_dp_surface_coords
if dp_surface_coords_fld in tensor_dict:
dp_surface_coords = out_tensor_dict[dp_surface_coords_fld]
realigned_dp_surface_coords = densepose_ops.change_coordinate_frame(
dp_surface_coords, im_box)
out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords
if use_bfloat16: if use_bfloat16:
preprocessed_resized_image = tf.cast( preprocessed_resized_image = tf.cast(
preprocessed_resized_image, tf.bfloat16) preprocessed_resized_image, tf.bfloat16)
...@@ -355,7 +363,8 @@ def pad_input_data_to_static_shapes(tensor_dict, ...@@ -355,7 +363,8 @@ def pad_input_data_to_static_shapes(tensor_dict,
num_classes, num_classes,
spatial_image_shape=None, spatial_image_shape=None,
max_num_context_features=None, max_num_context_features=None,
context_feature_length=None): context_feature_length=None,
max_dp_points=336):
"""Pads input tensors to static shapes. """Pads input tensors to static shapes.
In case num_additional_channels > 0, we assume that the additional channels In case num_additional_channels > 0, we assume that the additional channels
...@@ -372,6 +381,11 @@ def pad_input_data_to_static_shapes(tensor_dict, ...@@ -372,6 +381,11 @@ def pad_input_data_to_static_shapes(tensor_dict,
max_num_context_features (optional): The maximum number of context max_num_context_features (optional): The maximum number of context
features needed to compute shapes padding. features needed to compute shapes padding.
context_feature_length (optional): The length of the context feature. context_feature_length (optional): The length of the context feature.
max_dp_points (optional): The maximum number of DensePose sampled points per
instance. The default (336) is selected since the original DensePose paper
(https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number
of samples per part is 14, and therefore 24 * 14 = 336 is the maximum
sampler per instance.
Returns: Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for A dictionary keyed by fields.InputDataFields containing padding shapes for
...@@ -476,6 +490,15 @@ def pad_input_data_to_static_shapes(tensor_dict, ...@@ -476,6 +490,15 @@ def pad_input_data_to_static_shapes(tensor_dict,
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[fields.InputDataFields. padding_shapes[fields.InputDataFields.
groundtruth_keypoint_weights] = padding_shape groundtruth_keypoint_weights] = padding_shape
if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict:
padding_shapes[
fields.InputDataFields.groundtruth_dp_num_points] = [max_num_boxes]
padding_shapes[
fields.InputDataFields.groundtruth_dp_part_ids] = [
max_num_boxes, max_dp_points]
padding_shapes[
fields.InputDataFields.groundtruth_dp_surface_coords] = [
max_num_boxes, max_dp_points, 4]
# Prepare for ContextRCNN related fields. # Prepare for ContextRCNN related fields.
if fields.InputDataFields.context_features in tensor_dict: if fields.InputDataFields.context_features in tensor_dict:
...@@ -535,6 +558,10 @@ def augment_input_data(tensor_dict, data_augmentation_options): ...@@ -535,6 +558,10 @@ def augment_input_data(tensor_dict, data_augmentation_options):
in tensor_dict) in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores in include_multiclass_scores = (fields.InputDataFields.multiclass_scores in
tensor_dict) tensor_dict)
dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords]
include_dense_pose = all(field in tensor_dict for field in dense_pose_fields)
tensor_dict = preprocessor.preprocess( tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options, tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map( func_arg_map=preprocessor.get_default_func_arg_map(
...@@ -543,7 +570,8 @@ def augment_input_data(tensor_dict, data_augmentation_options): ...@@ -543,7 +570,8 @@ def augment_input_data(tensor_dict, data_augmentation_options):
include_multiclass_scores=include_multiclass_scores, include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks, include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints, include_keypoints=include_keypoints,
include_keypoint_visibilities=include_keypoint_visibilities)) include_keypoint_visibilities=include_keypoint_visibilities,
include_dense_pose=include_dense_pose))
tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image], axis=0) tensor_dict[fields.InputDataFields.image], axis=0)
return tensor_dict return tensor_dict
...@@ -572,6 +600,9 @@ def _get_labels_dict(input_dict): ...@@ -572,6 +600,9 @@ def _get_labels_dict(input_dict):
fields.InputDataFields.groundtruth_difficult, fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_keypoint_visibilities, fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_keypoint_weights, fields.InputDataFields.groundtruth_keypoint_weights,
fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords
] ]
for key in optional_label_keys: for key in optional_label_keys:
...@@ -720,6 +751,17 @@ def train_input(train_config, train_input_config, ...@@ -720,6 +751,17 @@ def train_input(train_config, train_input_config,
groundtruth visibilities for each keypoint. groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[batch_size, num_classes] float32 k-hot tensor of classes. [batch_size, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
Raises: Raises:
TypeError: if the `train_config`, `train_input_config` or `model_config` TypeError: if the `train_config`, `train_input_config` or `model_config`
...@@ -861,6 +903,17 @@ def eval_input(eval_config, eval_input_config, model_config, ...@@ -861,6 +903,17 @@ def eval_input(eval_config, eval_input_config, model_config,
same class which heavily occlude each other. same class which heavily occlude each other.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[num_boxes, num_classes] float32 k-hot tensor of classes. [num_boxes, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
Raises: Raises:
TypeError: if the `eval_config`, `eval_input_config` or `model_config` TypeError: if the `eval_config`, `eval_input_config` or `model_config`
......
...@@ -1293,6 +1293,51 @@ class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase): ...@@ -1293,6 +1293,51 @@ class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase):
groundtruth_keypoint_weights, groundtruth_keypoint_weights,
[[1.0, 1.0], [1.0, 1.0]]) [[1.0, 1.0], [1.0, 1.0]])
def test_groundtruth_dense_pose(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
num_classes = 1
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
transformed_dp_num_points = transformed_inputs[
fields.InputDataFields.groundtruth_dp_num_points]
transformed_dp_part_ids = transformed_inputs[
fields.InputDataFields.groundtruth_dp_part_ids]
transformed_dp_surface_coords = transformed_inputs[
fields.InputDataFields.groundtruth_dp_surface_coords]
return (transformed_dp_num_points, transformed_dp_part_ids,
transformed_dp_surface_coords)
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
self.assertAllEqual(dp_num_points, [0, 2])
self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]])
self.assertAllClose(
dp_surface_coords,
[[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]])
class PadInputDataToStaticShapesFnTest(test_case.TestCase): class PadInputDataToStaticShapesFnTest(test_case.TestCase):
...@@ -1454,6 +1499,35 @@ class PadInputDataToStaticShapesFnTest(test_case.TestCase): ...@@ -1454,6 +1499,35 @@ class PadInputDataToStaticShapesFnTest(test_case.TestCase):
fields.InputDataFields.groundtruth_keypoint_visibilities] fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16]) .shape.as_list(), [3, 16])
def test_dense_pose(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128],
max_dp_points=200)
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids]
.shape.as_list(), [3, 200])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords]
.shape.as_list(), [3, 200, 4])
def test_context_features(self): def test_context_features(self):
context_memory_size = 8 context_memory_size = 8
context_feature_length = 10 context_feature_length = 10
......
...@@ -185,6 +185,9 @@ class FakeDetectionModel(model.DetectionModel): ...@@ -185,6 +185,9 @@ class FakeDetectionModel(model.DetectionModel):
""" """
return {var.op.name: var for var in tf.global_variables()} return {var.op.name: var for var in tf.global_variables()}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def updates(self): def updates(self):
"""Returns a list of update operators for this model. """Returns a list of update operators for this model.
......
...@@ -2330,8 +2330,39 @@ class CenterNetMetaArch(model.DetectionModel): ...@@ -2330,8 +2330,39 @@ class CenterNetMetaArch(model.DetectionModel):
def regularization_losses(self): def regularization_losses(self):
return [] return []
def restore_map(self, fine_tune_checkpoint_type='classification', def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False): load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
if fine_tune_checkpoint_type == 'classification': if fine_tune_checkpoint_type == 'classification':
return {'feature_extractor': self._feature_extractor.get_base_model()} return {'feature_extractor': self._feature_extractor.get_base_model()}
...@@ -2340,7 +2371,7 @@ class CenterNetMetaArch(model.DetectionModel): ...@@ -2340,7 +2371,7 @@ class CenterNetMetaArch(model.DetectionModel):
return {'feature_extractor': self._feature_extractor.get_model()} return {'feature_extractor': self._feature_extractor.get_model()}
else: else:
raise ValueError('Unknown fine tune checkpoint type - {}'.format( raise ValueError('Not supported fine tune checkpoint type - {}'.format(
fine_tune_checkpoint_type)) fine_tune_checkpoint_type))
def updates(self): def updates(self):
......
...@@ -1574,8 +1574,9 @@ class CenterNetMetaArchRestoreTest(test_case.TestCase): ...@@ -1574,8 +1574,9 @@ class CenterNetMetaArchRestoreTest(test_case.TestCase):
"""Test restore map for a resnet backbone.""" """Test restore map for a resnet backbone."""
model = build_center_net_meta_arch(build_resnet=True) model = build_center_net_meta_arch(build_resnet=True)
restore_map = model.restore_map('classification') restore_from_objects_map = model.restore_from_objects('classification')
self.assertIsInstance(restore_map['feature_extractor'], tf.keras.Model) self.assertIsInstance(restore_from_objects_map['feature_extractor'],
tf.keras.Model)
class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor): class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor):
...@@ -1601,9 +1602,6 @@ class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor): ...@@ -1601,9 +1602,6 @@ class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor):
def postprocess(self): def postprocess(self):
pass pass
def restore_map(self):
pass
def call(self, inputs): def call(self, inputs):
batch_size, input_height, input_width, _ = inputs.shape batch_size, input_height, input_width, _ = inputs.shape
fake_output = tf.ones([ fake_output = tf.ones([
......
...@@ -250,35 +250,6 @@ class SSDKerasFeatureExtractor(tf.keras.Model): ...@@ -250,35 +250,6 @@ class SSDKerasFeatureExtractor(tf.keras.Model):
def call(self, inputs, **kwargs): def call(self, inputs, **kwargs):
return self._extract_features(inputs) return self._extract_features(inputs)
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
feature_extractor_scope: A scope name for the feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
if tf.executing_eagerly():
for variable in self.variables:
# variable.name includes ":0" at the end, but the names in the
# checkpoint do not have the suffix ":0". So, we strip it here.
var_name = variable.name[:-2]
if var_name.startswith(feature_extractor_scope + '/'):
var_name = var_name.replace(feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
else:
# b/137854499: use global_variables.
for variable in variables_helper.get_global_variables_safely():
var_name = variable.op.name
if var_name.startswith(feature_extractor_scope + '/'):
var_name = var_name.replace(feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class SSDMetaArch(model.DetectionModel): class SSDMetaArch(model.DetectionModel):
"""SSD Meta-architecture definition.""" """SSD Meta-architecture definition."""
...@@ -508,12 +479,9 @@ class SSDMetaArch(model.DetectionModel): ...@@ -508,12 +479,9 @@ class SSDMetaArch(model.DetectionModel):
ValueError: if inputs tensor does not have type tf.float32 ValueError: if inputs tensor does not have type tf.float32
""" """
with tf.name_scope('Preprocessor'): with tf.name_scope('Preprocessor'):
(resized_inputs, normalized_inputs = self._feature_extractor.preprocess(inputs)
true_image_shapes) = shape_utils.resize_images_and_return_shapes( return shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn) normalized_inputs, self._image_resizer_fn)
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, preprocessed_images, true_image_shapes): def _compute_clip_window(self, preprocessed_images, true_image_shapes):
"""Computes clip window to use during post_processing. """Computes clip window to use during post_processing.
...@@ -1295,8 +1263,8 @@ class SSDMetaArch(model.DetectionModel): ...@@ -1295,8 +1263,8 @@ class SSDMetaArch(model.DetectionModel):
classification checkpoint for initialization prior to training. classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'. Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type='detection'`). If False, only variables `fine_tune_checkpoint_type` is `detection`). If False, only variables
within the appropriate scopes are included. Default False. within the feature extractor scope are included. Default False.
Returns: Returns:
A dict mapping variable names (to load from a checkpoint) to variables in A dict mapping variable names (to load from a checkpoint) to variables in
...@@ -1311,36 +1279,56 @@ class SSDMetaArch(model.DetectionModel): ...@@ -1311,36 +1279,56 @@ class SSDMetaArch(model.DetectionModel):
elif fine_tune_checkpoint_type == 'detection': elif fine_tune_checkpoint_type == 'detection':
variables_to_restore = {} variables_to_restore = {}
if tf.executing_eagerly(): for variable in variables_helper.get_global_variables_safely():
var_name = variable.op.name
if load_all_detection_checkpoint_vars: if load_all_detection_checkpoint_vars:
# Grab all detection vars by name variables_to_restore[var_name] = variable
for variable in self.variables:
# variable.name includes ":0" at the end, but the names in the
# checkpoint do not have the suffix ":0". So, we strip it here.
var_name = variable.name[:-2]
variables_to_restore[var_name] = variable
else: else:
# Grab just the feature extractor vars by name if var_name.startswith(self._extract_features_scope):
for variable in self._feature_extractor.variables:
# variable.name includes ":0" at the end, but the names in the
# checkpoint do not have the suffix ":0". So, we strip it here.
var_name = variable.name[:-2]
variables_to_restore[var_name] = variable
else:
for variable in variables_helper.get_global_variables_safely():
var_name = variable.op.name
if load_all_detection_checkpoint_vars:
variables_to_restore[var_name] = variable variables_to_restore[var_name] = variable
else:
if var_name.startswith(self._extract_features_scope):
variables_to_restore[var_name] = variable
return variables_to_restore return variables_to_restore
else: else:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type)) fine_tune_checkpoint_type))
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (above) is intended
to be used to restore Slim-based models when running Tensorflow 1.x.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
if fine_tune_checkpoint_type == 'classification':
return {
'feature_extractor':
self._feature_extractor.classification_backbone
}
elif fine_tune_checkpoint_type == 'detection':
fake_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': fake_model}
else:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
def updates(self): def updates(self):
"""Returns a list of update operators for this model. """Returns a list of update operators for this model.
......
...@@ -123,6 +123,9 @@ class SimpleModel(model.DetectionModel): ...@@ -123,6 +123,9 @@ class SimpleModel(model.DetectionModel):
return [] return []
def restore_map(self, *args, **kwargs): def restore_map(self, *args, **kwargs):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
return {'model': self} return {'model': self}
def preprocess(self, _): def preprocess(self, _):
...@@ -174,7 +177,7 @@ class ModelCheckpointTest(tf.test.TestCase): ...@@ -174,7 +177,7 @@ class ModelCheckpointTest(tf.test.TestCase):
class IncompatibleModel(SimpleModel): class IncompatibleModel(SimpleModel):
def restore_map(self, *args, **kwargs): def restore_from_objects(self, *args, **kwargs):
return {'weight': self.weight} return {'weight': self.weight}
...@@ -207,7 +210,6 @@ class CheckpointV2Test(tf.test.TestCase): ...@@ -207,7 +210,6 @@ class CheckpointV2Test(tf.test.TestCase):
model_lib_v2.load_fine_tune_checkpoint( model_lib_v2.load_fine_tune_checkpoint(
self._model, self._ckpt_path, checkpoint_type='', self._model, self._ckpt_path, checkpoint_type='',
checkpoint_version=train_pb2.CheckpointVersion.V2, checkpoint_version=train_pb2.CheckpointVersion.V2,
load_all_detection_checkpoint_vars=True,
input_dataset=self._train_input_fn(), input_dataset=self._train_input_fn(),
unpad_groundtruth_tensors=True) unpad_groundtruth_tensors=True)
np.testing.assert_allclose(self._model.weight.numpy(), 42) np.testing.assert_allclose(self._model.weight.numpy(), 42)
...@@ -220,7 +222,6 @@ class CheckpointV2Test(tf.test.TestCase): ...@@ -220,7 +222,6 @@ class CheckpointV2Test(tf.test.TestCase):
model_lib_v2.load_fine_tune_checkpoint( model_lib_v2.load_fine_tune_checkpoint(
IncompatibleModel(), self._ckpt_path, checkpoint_type='', IncompatibleModel(), self._ckpt_path, checkpoint_type='',
checkpoint_version=train_pb2.CheckpointVersion.V2, checkpoint_version=train_pb2.CheckpointVersion.V2,
load_all_detection_checkpoint_vars=True,
input_dataset=self._train_input_fn(), input_dataset=self._train_input_fn(),
unpad_groundtruth_tensors=True) unpad_groundtruth_tensors=True)
......
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import time
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection import inputs
from object_detection import model_lib
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import visualization_utils as vutils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import tpu as contrib_tpu
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP
RESTORE_MAP_ERROR_TEMPLATE = (
'Since we are restoring a v2 style checkpoint'
' restore_map was expected to return a (str -> Model) mapping,'
' but we received a ({} -> {}) mapping instead.'
)
def _compute_losses_and_predictions_dicts(
model, features, labels,
add_regularization_loss=True):
"""Computes the losses dict and predictions dict for a model on inputs.
Args:
model: a DetectionModel (based on Keras).
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input` and
`inputs.eval_input`.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors post-unstacking. The original
labels are of the form returned by `inputs.train_input` and
`inputs.eval_input`. The shapes may have been modified by unstacking with
`model_lib.unstack_batch`. However, the dictionary includes the following
fields.
labels[fields.InputDataFields.num_groundtruth_boxes] is a
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor
containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a float32
one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor
containing groundtruth weights for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
float32 tensor containing only binary values, which represent
instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a
float32 tensor containing keypoints for each box.
labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor
containing group_of annotations.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
Returns:
A tuple containing the losses dictionary (with the total loss under
the key 'Loss/total_loss'), and the predictions dictionary produced by
`model.predict`.
"""
model_lib.provide_groundtruth(model, labels)
preprocessed_images = features[fields.InputDataFields.image]
prediction_dict = model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
losses_dict = model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if add_regularization_loss:
# TODO(kaftan): As we figure out mixed precision & bfloat 16, we may
## need to convert these regularization losses from bfloat16 to float32
## as well.
regularization_losses = model.regularization_losses()
if regularization_losses:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
return losses_dict, prediction_dict
# TODO(kaftan): Explore removing learning_rate from this method & returning
## The full losses dict instead of just total_loss, then doing all summaries
## saving in a utility method called by the outer training loop.
# TODO(kaftan): Explore adding gradient summaries
def eager_train_step(detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
learning_rate,
add_regularization_loss=True,
clip_gradients_value=None,
global_step=None,
num_replicas=1.0):
"""Process a single training batch.
This method computes the loss for the model on a single training batch,
while tracking the gradients with a gradient tape. It then updates the
model variables with the optimizer, clipping the gradients if
clip_gradients_value is present.
This method can run eagerly or inside a tf.function.
Args:
detection_model: A DetectionModel (based on Keras) to train.
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional, not used
during training) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors. This method unstacks
these labels using model_lib.unstack_batch. The stacked labels are of
the form returned by `inputs.train_input` and `inputs.eval_input`.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes. num_classes includes the background class.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
optimizer: The training optimizer that will update the variables.
learning_rate: The learning rate tensor for the current training step.
This is used only for TensorBoard logging purposes, it does not affect
model training.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
clip_gradients_value: If this is present, clip the gradients global norm
at this value using `tf.clip_by_global_norm`.
global_step: The current training step. Used for TensorBoard logging
purposes. This step is not updated by this function and must be
incremented separately.
num_replicas: The number of replicas in the current distribution strategy.
This is used to scale the total loss so that training in a distribution
strategy works correctly.
Returns:
The total loss observed at this training step
"""
# """Execute a single training step in the TF v2 style loop."""
is_training = True
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
with tf.GradientTape() as tape:
losses_dict, _ = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
total_loss = losses_dict['Loss/total_loss']
# Normalize loss for num replicas
total_loss = tf.math.divide(total_loss,
tf.constant(num_replicas, dtype=tf.float32))
losses_dict['Loss/normalized_total_loss'] = total_loss
for loss_type in losses_dict:
tf.compat.v2.summary.scalar(
loss_type, losses_dict[loss_type], step=global_step)
trainable_variables = detection_model.trainable_variables
gradients = tape.gradient(total_loss, trainable_variables)
if clip_gradients_value:
gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value)
optimizer.apply_gradients(zip(gradients, trainable_variables))
tf.compat.v2.summary.scalar('learning_rate', learning_rate, step=global_step)
tf.compat.v2.summary.image(
name='train_input_images',
step=global_step,
data=features[fields.InputDataFields.image],
max_outputs=3)
return total_loss
def validate_tf_v2_checkpoint_restore_map(checkpoint_restore_map):
"""Ensure that given dict is a valid TF v2 style restore map.
Args:
checkpoint_restore_map: A dict mapping strings to tf.keras.Model objects.
Raises:
ValueError: If they keys in checkpoint_restore_map are not strings or if
the values are not keras Model objects.
"""
for key, value in checkpoint_restore_map.items():
if not (isinstance(key, str) and
(isinstance(value, tf.Module)
or isinstance(value, tf.train.Checkpoint))):
raise TypeError(RESTORE_MAP_ERROR_TEMPLATE.format(
key.__class__.__name__, value.__class__.__name__))
def is_object_based_checkpoint(checkpoint_path):
"""Returns true if `checkpoint_path` points to an object-based checkpoint."""
var_names = [var[0] for var in tf.train.list_variables(checkpoint_path)]
return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names
def load_fine_tune_checkpoint(
model, checkpoint_path, checkpoint_type, checkpoint_version, input_dataset,
unpad_groundtruth_tensors):
"""Load a fine tuning classification or detection checkpoint.
To make sure the model variables are all built, this method first executes
the model by computing a dummy loss. (Models might not have built their
variables before their first execution)
It then loads an object-based classification or detection checkpoint.
This method updates the model in-place and does not return a value.
Args:
model: A DetectionModel (based on Keras) to load a fine-tuning
checkpoint for.
checkpoint_path: Directory with checkpoints file or path to checkpoint.
checkpoint_type: Whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`.
checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating
whether to load checkpoints in V1 style or V2 style. In this binary
we only support V2 style (object-based) checkpoints.
input_dataset: The tf.data Dataset the model is being trained on. Needed
to get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
Raises:
IOError: if `checkpoint_path` does not point at a valid object-based
checkpoint
ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2
"""
if not is_object_based_checkpoint(checkpoint_path):
raise IOError('Checkpoint is expected to be an object-based checkpoint.')
if checkpoint_version == train_pb2.CheckpointVersion.V1:
raise ValueError('Checkpoint version should be V2')
features, labels = iter(input_dataset).next()
@tf.function
def _dummy_computation_fn(features, labels):
model._is_training = False # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(False)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
return _compute_losses_and_predictions_dicts(
model,
features,
labels)
strategy = tf.compat.v2.distribute.get_strategy()
strategy.experimental_run_v2(
_dummy_computation_fn, args=(
features,
labels,
))
restore_from_objects_dict = model.restore_from_objects(
fine_tune_checkpoint_type=checkpoint_type)
validate_tf_v2_checkpoint_restore_map(restore_from_objects_dict)
ckpt = tf.train.Checkpoint(**restore_from_objects_dict)
ckpt.restore(checkpoint_path).assert_existing_objects_matched()
def get_filepath(strategy, filepath):
"""Get appropriate filepath for worker.
Args:
strategy: A tf.distribute.Strategy object.
filepath: A path to where the Checkpoint object is stored.
Returns:
A temporary filepath for non-chief workers to use or the original filepath
for the chief.
"""
if strategy.extended.should_checkpoint:
return filepath
else:
# TODO(vighneshb) Replace with the public API when TF exposes it.
task_id = strategy.extended._task_id # pylint:disable=protected-access
return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id))
def clean_temporary_directories(strategy, filepath):
"""Temporary directory clean up for MultiWorker Mirrored Strategy.
This is needed for all non-chief workers.
Args:
strategy: A tf.distribute.Strategy object.
filepath: The filepath for the temporary directory.
"""
if not strategy.extended.should_checkpoint:
if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath):
tf.io.gfile.rmtree(filepath)
def train_loop(
pipeline_config_path,
model_dir,
config_override=None,
train_steps=None,
use_tpu=False,
save_final_config=False,
checkpoint_every_n=1000,
checkpoint_max_to_keep=7,
**kwargs):
"""Trains a model using eager + functions.
This method:
1. Processes the pipeline configs
2. (Optionally) saves the as-run config
3. Builds the model & optimizer
4. Gets the training input data
5. Loads a fine-tuning detection or classification checkpoint if requested
6. Loops over the train data, executing distributed training steps inside
tf.functions.
7. Checkpoints the model every `checkpoint_every_n` training steps.
8. Logs the training metrics as TensorBoard summaries.
Args:
pipeline_config_path: A path to a pipeline config file.
model_dir:
The directory to save checkpoints and summaries to.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
use_tpu: Boolean, whether training and evaluation should run on TPU.
save_final_config: Whether to save final config (obtained after applying
overrides) to `model_dir`.
checkpoint_every_n:
Checkpoint every n training steps.
checkpoint_max_to_keep:
int, the number of most recent checkpoints to keep in the model directory.
**kwargs: Additional keyword arguments for configuration override.
"""
## Parse the configs
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
configs = merge_external_params_with_configs(
configs, None, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors
add_regularization_loss = train_config.add_regularization_loss
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16')
if train_config.load_all_detection_checkpoint_vars:
raise ValueError('train_pb2.load_all_detection_checkpoint_vars '
'unsupported in TF2')
config_util.update_fine_tune_checkpoint_type(train_config)
fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type
fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version
# Write the as-run pipeline config to disk.
if save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, model_dir)
# Build the model, optimizer, and training input
strategy = tf.compat.v2.distribute.get_strategy()
with strategy.scope():
detection_model = model_builder.build(
model_config=model_config, is_training=True)
def train_dataset_fn(input_context):
"""Callable to create train input."""
# Create the inputs.
train_input = inputs.train_input(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config,
model=detection_model,
input_context=input_context)
train_input = train_input.repeat()
return train_input
train_input = strategy.experimental_distribute_datasets_from_function(
train_dataset_fn)
global_step = tf.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step',
aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA)
optimizer, (learning_rate,) = optimizer_builder.build(
train_config.optimizer, global_step=global_step)
if callable(learning_rate):
learning_rate_fn = learning_rate
else:
learning_rate_fn = lambda: learning_rate
## Train the model
# Get the appropriate filepath (temporary or not) based on whether the worker
# is the chief.
summary_writer_filepath = get_filepath(strategy,
os.path.join(model_dir, 'train'))
summary_writer = tf.compat.v2.summary.create_file_writer(
summary_writer_filepath)
if use_tpu:
num_steps_per_iteration = 100
else:
# TODO(b/135933080) Explore setting to 100 when GPU performance issues
# are fixed.
num_steps_per_iteration = 1
with summary_writer.as_default():
with strategy.scope():
with tf.compat.v2.summary.record_if(
lambda: global_step % num_steps_per_iteration == 0):
# Load a fine-tuning checkpoint.
if train_config.fine_tune_checkpoint:
load_fine_tune_checkpoint(detection_model,
train_config.fine_tune_checkpoint,
fine_tune_checkpoint_type,
fine_tune_checkpoint_version,
train_input,
unpad_groundtruth_tensors)
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model, optimizer=optimizer)
manager_dir = get_filepath(strategy, model_dir)
if not strategy.extended.should_checkpoint:
checkpoint_max_to_keep = 1
manager = tf.compat.v2.train.CheckpointManager(
ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep)
# We use the following instead of manager.latest_checkpoint because
# manager_dir does not point to the model directory when we are running
# in a worker.
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
ckpt.restore(latest_checkpoint)
def train_step_fn(features, labels):
"""Single train step."""
loss = eager_train_step(
detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
learning_rate=learning_rate_fn(),
add_regularization_loss=add_regularization_loss,
clip_gradients_value=clip_gradients_value,
global_step=global_step,
num_replicas=strategy.num_replicas_in_sync)
global_step.assign_add(1)
return loss
def _sample_and_train(strategy, train_step_fn, data_iterator):
features, labels = data_iterator.next()
per_replica_losses = strategy.experimental_run_v2(
train_step_fn, args=(features, labels))
# TODO(anjalisridhar): explore if it is safe to remove the
## num_replicas scaling of the loss and switch this to a ReduceOp.Mean
return strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
@tf.function
def _dist_train_step(data_iterator):
"""A distributed train step."""
if num_steps_per_iteration > 1:
for _ in tf.range(num_steps_per_iteration - 1):
_sample_and_train(strategy, train_step_fn, data_iterator)
return _sample_and_train(strategy, train_step_fn, data_iterator)
train_input_iter = iter(train_input)
if int(global_step.value()) == 0:
manager.save()
checkpointed_step = int(global_step.value())
logged_step = global_step.value()
last_step_time = time.time()
for _ in range(global_step.value(), train_steps,
num_steps_per_iteration):
loss = _dist_train_step(train_input_iter)
time_taken = time.time() - last_step_time
last_step_time = time.time()
tf.compat.v2.summary.scalar(
'steps_per_sec', num_steps_per_iteration * 1.0 / time_taken,
step=global_step)
if global_step.value() - logged_step >= 100:
tf.logging.info(
'Step {} per-step time {:.3f}s loss={:.3f}'.format(
global_step.value(), time_taken / num_steps_per_iteration,
loss))
logged_step = global_step.value()
if ((int(global_step.value()) - checkpointed_step) >=
checkpoint_every_n):
manager.save()
checkpointed_step = int(global_step.value())
# Remove the checkpoint directories of the non-chief workers that
# MultiWorkerMirroredStrategy forces us to save during sync distributed
# training.
clean_temporary_directories(strategy, manager_dir)
clean_temporary_directories(strategy, summary_writer_filepath)
def eager_eval_loop(
detection_model,
configs,
eval_dataset,
use_tpu=False,
postprocess_on_cpu=False,
global_step=None):
"""Evaluate the model eagerly on the evaluation dataset.
This method will compute the evaluation metrics specified in the configs on
the entire evaluation dataset, then return the metrics. It will also log
the metrics to TensorBoard.
Args:
detection_model: A DetectionModel (based on Keras) to evaluate.
configs: Object detection configs that specify the evaluators that should
be used, as well as whether regularization loss should be included and
if bfloat16 should be used on TPUs.
eval_dataset: Dataset containing evaluation data.
use_tpu: Whether a TPU is being used to execute the model for evaluation.
postprocess_on_cpu: Whether model postprocessing should happen on
the CPU when using a TPU to execute the model.
global_step: A variable containing the training step this model was trained
to. Used for logging purposes.
Returns:
A dict of evaluation metrics representing the results of this evaluation.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
add_regularization_loss = train_config.add_regularization_loss
is_training = False
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
class_agnostic_category_index = (
label_map_util.create_class_agnostic_category_index())
class_agnostic_evaluators = eval_util.get_evaluators(
eval_config,
list(class_agnostic_category_index.values()),
evaluator_options)
class_aware_evaluators = None
if eval_input_config.label_map_path:
class_aware_category_index = (
label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path))
class_aware_evaluators = eval_util.get_evaluators(
eval_config,
list(class_aware_category_index.values()),
evaluator_options)
evaluators = None
loss_metrics = {}
@tf.function
def compute_eval_dict(features, labels):
"""Compute the evaluation result on an image."""
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
# TODO(kaftan): Depending on how postprocessing will work for TPUS w/
## TPUStrategy, may be good to move wrapping to a utility method
if use_tpu and postprocess_on_cpu:
detections = contrib_tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper(
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
# TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util
## and call this from there.
groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access
detection_model, class_agnostic, eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[
fields.InputDataFields.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
return eval_dict, losses_dict, class_agnostic
agnostic_categories = label_map_util.create_class_agnostic_category_index()
per_class_categories = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
keypoint_edges = [
(kp.start, kp.end) for kp in eval_config.keypoint_edge]
for i, (features, labels) in enumerate(eval_dataset):
eval_dict, losses_dict, class_agnostic = compute_eval_dict(features, labels)
if class_agnostic:
category_index = agnostic_categories
else:
category_index = per_class_categories
if i % 100 == 0:
tf.logging.info('Finished eval step %d', i)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images and i < eval_config.num_visualizations:
sbys_image_list = vutils.draw_side_by_side_evaluation_image(
eval_dict,
category_index=category_index,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None)
sbys_images = tf.concat(sbys_image_list, axis=0)
tf.compat.v2.summary.image(
name='eval_side_by_side_' + str(i),
step=global_step,
data=sbys_images,
max_outputs=1)
if evaluators is None:
if class_agnostic:
evaluators = class_agnostic_evaluators
else:
evaluators = class_aware_evaluators
for evaluator in evaluators:
evaluator.add_eval_dict(eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
if loss_key not in loss_metrics:
loss_metrics[loss_key] = tf.keras.metrics.Mean()
# Skip the loss with value equal or lower than 0.0 when calculating the
# average loss since they don't usually reflect the normal loss values
# causing spurious average loss value.
if loss_tensor <= 0.0:
continue
loss_metrics[loss_key].update_state(loss_tensor)
eval_metrics = {}
for evaluator in evaluators:
eval_metrics.update(evaluator.evaluate())
for loss_key in loss_metrics:
eval_metrics[loss_key] = loss_metrics[loss_key].result()
eval_metrics = {str(k): v for k, v in eval_metrics.items()}
tf.logging.info('Eval metrics at step %d', global_step)
for k in eval_metrics:
tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step)
tf.logging.info('\t+ %s: %f', k, eval_metrics[k])
return eval_metrics
def eval_continuously(
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
use_tpu=False,
override_eval_num_epochs=True,
postprocess_on_cpu=False,
model_dir=None,
checkpoint_dir=None,
wait_interval=180,
timeout=3600,
**kwargs):
"""Run continuous evaluation of a detection model eagerly.
This method builds the model, and continously restores it from the most
recent training checkpoint in the checkpoint directory & evaluates it
on the evaluation data.
Args:
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
use_tpu: Boolean, whether training and evaluation should run on TPU.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
model_dir: Directory to output resulting evaluation summaries to.
checkpoint_dir: Directory that contains the training checkpoints.
wait_interval: The mimmum number of seconds to wait before checking for a
new checkpoint.
timeout: The maximum number of seconds to wait for a checkpoint. Execution
will terminate if no new checkpoints are found after these many seconds.
**kwargs: Additional keyword arguments for configuration override.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if train_steps is not None:
kwargs['train_steps'] = train_steps
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, None, kwargs_dict=kwargs)
model_config = configs['model']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16')
detection_model = model_builder.build(
model_config=model_config, is_training=True)
# Create the inputs.
eval_inputs = []
for eval_input_config in eval_input_configs:
next_eval_input = inputs.eval_input(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config,
model=detection_model)
eval_inputs.append((eval_input_config.name, next_eval_input))
global_step = tf.compat.v2.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64)
for latest_checkpoint in tf.train.checkpoints_iterator(
checkpoint_dir, timeout=timeout, min_interval_secs=wait_interval):
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model)
ckpt.restore(latest_checkpoint).expect_partial()
for eval_name, eval_input in eval_inputs:
summary_writer = tf.compat.v2.summary.create_file_writer(
os.path.join(model_dir, 'eval', eval_name))
with summary_writer.as_default():
eager_eval_loop(
detection_model,
configs,
eval_input,
use_tpu=use_tpu,
postprocess_on_cpu=postprocess_on_cpu,
global_step=global_step)
...@@ -16,14 +16,6 @@ ...@@ -16,14 +16,6 @@
r"""Creates and runs TF2 object detection models. r"""Creates and runs TF2 object detection models.
##################################
NOTE: This module has not been fully tested; please bear with us while we iron
out the kinks.
##################################
When a TPU device is available, this binary uses TPUStrategy. Otherwise, it uses
GPUS with MirroredStrategy/MultiWorkerMirroredStrategy.
For local training/evaluation run: For local training/evaluation run:
PIPELINE_CONFIG_PATH=path/to/pipeline.config PIPELINE_CONFIG_PATH=path/to/pipeline.config
MODEL_DIR=/tmp/model_outputs MODEL_DIR=/tmp/model_outputs
...@@ -60,6 +52,8 @@ flags.DEFINE_string( ...@@ -60,6 +52,8 @@ flags.DEFINE_string(
flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an'
'evaluation checkpoint before exiting.') 'evaluation checkpoint before exiting.')
flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.')
flags.DEFINE_integer( flags.DEFINE_integer(
'num_workers', 1, 'When num_workers > 1, training uses ' 'num_workers', 1, 'When num_workers > 1, training uses '
'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses '
...@@ -84,7 +78,7 @@ def main(unused_argv): ...@@ -84,7 +78,7 @@ def main(unused_argv):
checkpoint_dir=FLAGS.checkpoint_dir, checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout) wait_interval=300, timeout=FLAGS.eval_timeout)
else: else:
if tf.config.get_visible_devices('TPU'): if FLAGS.use_tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver() resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver) tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver) tf.tpu.experimental.initialize_tpu_system(resolver)
......
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate bidirectional feature pyramids based on image features.
Provides bidirectional feature pyramid network (BiFPN) generators that can be
used to build object detection feature extractors, as proposed by Tan et al.
See https://arxiv.org/abs/1911.09070 for more details.
"""
import collections
import functools
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.utils import bifpn_utils
def _create_bifpn_input_config(fpn_min_level,
fpn_max_level,
input_max_level,
level_scales=None):
"""Creates a BiFPN input config for the input levels from a backbone network.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
Returns:
A list of dictionaries for each feature map expected as input to the BiFPN,
where each has entries for the feature map 'name' and 'scale'.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
bifpn_input_params = []
for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1):
bifpn_input_params.append({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level]
})
return bifpn_input_params
def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config):
"""Returns a list of BiFPN output node names, given a BiFPN node config.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution)
used by the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution)
used by the BiFPN.
node_config: the BiFPN node_config, a list of dictionaries corresponding to
each node in the BiFPN computation graph, where each entry should have an
associated 'name'.
Returns:
A list of strings corresponding to the names of the output BiFPN nodes.
"""
num_output_nodes = fpn_max_level - fpn_min_level + 1
return [node['name'] for node in node_config[-num_output_nodes:]]
def _create_bifpn_node_config(bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
bifpn_node_params=None,
level_scales=None):
"""Creates a config specifying a bidirectional feature pyramid network.
Args:
bifpn_num_iterations: the number of top-down bottom-up feature computations
to repeat in the BiFPN.
bifpn_num_filters: the number of filters (channels) for every feature map
used in the BiFPN.
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
bifpn_node_params: If not 'None', a dictionary of additional default BiFPN
node parameters that will be applied to all BiFPN nodes.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
Returns:
A list of dictionaries used to define nodes in the BiFPN computation graph,
as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070).
Each node's entry has the corresponding keys:
name: String. The name of this node in the BiFPN. The node name follows
the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn'
or 'up' refers to whether the node is in the top-down or bottom-up
portion of a single BiFPN iteration.
scale: the scale factor for this node, by default 2^level.
inputs: A list of names of nodes which are inputs to this node.
num_channels: The number of channels for this node.
combine_method: String. Name of the method used to combine input
node feature maps, 'fast_attention' by default for nodes which have more
than one input. Otherwise, 'None' for nodes with only one input node.
input_op: A (partial) function which is called to construct the layers
that will be applied to this BiFPN node's inputs. This function is
called with the arguments:
input_op(name, input_scale, input_num_channels, output_scale,
output_num_channels, conv_hyperparams, is_training,
freeze_batchnorm)
post_combine_op: A (partial) function which is called to construct the
layers that will be applied to the result of the combine operation for
this BiFPN node. This function will be called with the arguments:
post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm)
If 'None', then no layers will be applied after the combine operation
for this node.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
default_node_params = {
'num_channels':
bifpn_num_filters,
'combine_method':
'fast_attention',
'input_op':
functools.partial(
_create_bifpn_resample_block, downsample_method='max_pooling'),
'post_combine_op':
functools.partial(
bifpn_utils.create_conv_block,
num_filters=bifpn_num_filters,
kernel_size=3,
strides=1,
padding='SAME',
use_separable=True,
apply_batchnorm=True,
apply_activation=True,
conv_bn_act_pattern=False),
}
if bifpn_node_params:
default_node_params.update(bifpn_node_params)
bifpn_node_params = []
# Create additional base pyramid levels not provided as input to the BiFPN.
# Note, combine_method and post_combine_op are set to None for additional
# base pyramid levels because they do not combine multiple input BiFPN nodes.
for i in range(input_max_level + 1, fpn_max_level + 1):
node_params = dict(default_node_params)
node_params.update({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level],
'inputs': ['0_up_lvl_{}'.format(i - 1)],
'combine_method': None,
'post_combine_op': None,
})
bifpn_node_params.append(node_params)
for i in range(bifpn_num_iterations):
# The first bottom-up feature pyramid (which includes the input pyramid
# levels from the backbone network and the additional base pyramid levels)
# is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is
# indexed from 1, and repeated for bifpn_num_iterations iterations.
bifpn_i = i + 1
# Create top-down nodes.
for level_i in reversed(range(fpn_min_level, fpn_max_level)):
inputs = []
# BiFPN nodes in the top-down pass receive input from the corresponding
# level from the previous BiFPN iteration's bottom-up pass, except for the
# bottom-most (min) level node, which is computed once in the initial
# bottom-up pass, and is afterwards only computed in each top-down pass.
if level_i > fpn_min_level or bifpn_i == 1:
inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i))
else:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
# Create bottom-up nodes.
for level_i in range(fpn_min_level + 1, fpn_max_level + 1):
# BiFPN nodes in the bottom-up pass receive input from the corresponding
# level from the preceding top-down pass, except for the top (max) level
# which does not have a corresponding node in the top-down pass.
inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)]
if level_i < fpn_max_level:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_up_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
return bifpn_node_params
def _create_bifpn_resample_block(name,
input_scale,
input_num_channels,
output_scale,
output_num_channels,
conv_hyperparams,
is_training,
freeze_batchnorm,
downsample_method=None,
use_native_resize_op=False,
maybe_apply_1x1_conv=True,
apply_1x1_pre_sampling=True,
apply_1x1_post_sampling=False):
"""Creates resample block layers for input feature maps to BiFPN nodes.
Args:
name: String. Name used for this block of layers.
input_scale: Scale factor of the input feature map.
input_num_channels: Number of channels in the input feature map.
output_scale: Scale factor of the output feature map.
output_num_channels: Number of channels in the output feature map.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
downsample_method: String. Method to use when downsampling feature maps.
use_native_resize_op: Bool. Whether to use the native resize up when
upsampling feature maps.
maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be
applied if the input_num_channels differs from the output_num_channels.
apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map before the up/down-sampling operation.
apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map after the up/down-sampling operation.
Returns:
A list of layers which may be applied to the input feature maps in order to
compute feature maps with the specified scale and number of channels.
"""
# By default, 1x1 convolutions are only applied before sampling when the
# number of input and output channels differ.
if maybe_apply_1x1_conv and output_num_channels == input_num_channels:
apply_1x1_pre_sampling = False
apply_1x1_post_sampling = False
apply_bn_for_resampling = True
layers = []
if apply_1x1_pre_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_pre_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
layers.extend(
bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale,
downsample_method,
use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name))
if apply_1x1_post_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_post_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
return layers
def _create_bifpn_combine_op(num_inputs, name, combine_method):
"""Creates a BiFPN output config, a list of the output BiFPN node names.
Args:
num_inputs: The number of inputs to this combine operation.
name: String. The name of this combine operation.
combine_method: String. The method used to combine input feature maps.
Returns:
A function which may be called with a list of num_inputs feature maps
and which will return a single feature map.
"""
combine_op = None
if num_inputs < 1:
raise ValueError('Expected at least 1 input for BiFPN combine.')
elif num_inputs == 1:
combine_op = lambda x: x[0]
else:
combine_op = bifpn_utils.BiFPNCombineLayer(
combine_method=combine_method, name=name)
return combine_op
class KerasBiFpnFeatureMaps(tf.keras.Model):
"""Generates Keras based BiFPN feature maps from an input feature map pyramid.
A Keras model that generates multi-scale feature maps for detection by
iteratively computing top-down and bottom-up feature pyramids, as in the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details.
"""
def __init__(self,
bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
is_training,
conv_hyperparams,
freeze_batchnorm,
bifpn_node_params=None,
name=None):
"""Constructor.
Args:
bifpn_num_iterations: The number of top-down bottom-up iterations.
bifpn_num_filters: The number of filters (channels) to be used for all
feature maps in this BiFPN.
fpn_min_level: The minimum pyramid level (highest feature map resolution)
to use in the BiFPN.
fpn_max_level: The maximum pyramid level (lowest feature map resolution)
to use in the BiFPN.
input_max_level: The maximum pyramid level that will be provided as input
to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid
levels from input_max_level up to the desired fpn_max_level, with each
successivel level downsampling by a scale factor of 2 by default.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
bifpn_node_params: An optional dictionary that may be used to specify
default parameters for BiFPN nodes, without the need to provide a custom
bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all
BiFPN nodes will combine input feature maps by summation, rather than
by the default fast attention method.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasBiFpnFeatureMaps, self).__init__(name=name)
bifpn_node_config = _create_bifpn_node_config(
bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level,
input_max_level, bifpn_node_params)
bifpn_input_config = _create_bifpn_input_config(
fpn_min_level, fpn_max_level, input_max_level)
bifpn_output_node_names = _get_bifpn_output_node_names(
fpn_min_level, fpn_max_level, bifpn_node_config)
self.bifpn_node_config = bifpn_node_config
self.bifpn_output_node_names = bifpn_output_node_names
self.node_input_blocks = []
self.node_combine_op = []
self.node_post_combine_block = []
all_node_params = bifpn_input_config
all_node_names = [node['name'] for node in all_node_params]
for node_config in bifpn_node_config:
# Maybe transform and/or resample input feature maps.
input_blocks = []
for input_name in node_config['inputs']:
if input_name not in all_node_names:
raise ValueError(
'Input feature map ({}) does not exist:'.format(input_name))
input_index = all_node_names.index(input_name)
input_params = all_node_params[input_index]
input_block = node_config['input_op'](
name='{}/input_{}/'.format(node_config['name'], input_name),
input_scale=input_params['scale'],
input_num_channels=input_params.get('num_channels', None),
output_scale=node_config['scale'],
output_num_channels=node_config['num_channels'],
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm)
input_blocks.append((input_index, input_block))
# Combine input feature maps.
combine_op = _create_bifpn_combine_op(
num_inputs=len(input_blocks),
name=(node_config['name'] + '/combine'),
combine_method=node_config['combine_method'])
# Post-combine layers.
post_combine_block = []
if node_config['post_combine_op']:
post_combine_block.extend(node_config['post_combine_op'](
name=node_config['name'] + '/post_combine/',
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
self.node_input_blocks.append(input_blocks)
self.node_combine_op.append(combine_op)
self.node_post_combine_block.append(post_combine_block)
all_node_params.append(node_config)
all_node_names.append(node_config['name'])
def call(self, feature_pyramid):
"""Compute BiFPN feature maps from input feature pyramid.
Executed when calling the `.__call__` method on input.
Args:
feature_pyramid: list of tuples of (tensor_name, image_feature_tensor).
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = [el[1] for el in feature_pyramid]
output_feature_maps = [None for node in self.bifpn_output_node_names]
for index, node in enumerate(self.bifpn_node_config):
node_scope = 'node_{:02d}'.format(index)
with tf.name_scope(node_scope):
# Apply layer blocks to this node's input feature maps.
input_block_results = []
for input_index, input_block in self.node_input_blocks[index]:
block_result = feature_maps[input_index]
for layer in input_block:
block_result = layer(block_result)
input_block_results.append(block_result)
# Combine the resulting feature maps.
node_result = self.node_combine_op[index](input_block_results)
# Apply post-combine layer block if applicable.
for layer in self.node_post_combine_block[index]:
node_result = layer(node_result)
feature_maps.append(node_result)
if node['name'] in self.bifpn_output_node_names:
index = self.bifpn_output_node_names.index(node['name'])
output_feature_maps[index] = node_result
return collections.OrderedDict(
zip(self.bifpn_output_node_names, output_feature_maps))
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bidirectional feature pyramid generators."""
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
@parameterized.parameters({'bifpn_num_iterations': 2},
{'bifpn_num_iterations': 8})
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class BiFPNFeaturePyramidGeneratorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
force_use_bias: true
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_get_expected_feature_map_shapes(self, bifpn_num_iterations):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))
]
bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=128,
fpn_min_level=3,
fpn_max_level=7,
input_max_level=5,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False)
def graph_fn():
feature_maps = bifpn_generator(image_features)
return feature_maps
expected_feature_map_shapes = {
'{}_dn_lvl_3'.format(bifpn_num_iterations): (4, 16, 16, 128),
'{}_up_lvl_4'.format(bifpn_num_iterations): (4, 8, 8, 128),
'{}_up_lvl_5'.format(bifpn_num_iterations): (4, 4, 4, 128),
'{}_up_lvl_6'.format(bifpn_num_iterations): (4, 2, 2, 128),
'{}_up_lvl_7'.format(bifpn_num_iterations): (4, 1, 1, 128)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names(self, bifpn_num_iterations):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))
]
bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=128,
fpn_min_level=3,
fpn_max_level=7,
input_max_level=5,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
name='bifpn')
def graph_fn():
return bifpn_generator(image_features)
self.execute(graph_fn, [], g)
expected_variables = [
'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',
'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',
'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',
'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',
'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/bias',
'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/kernel',
'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',
'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',
'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel']
expected_node_variable_patterns = [
['bifpn/node_{:02}/{}_dn_lvl_6/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_5/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_4/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_3/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_4/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_5/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_6/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_7/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/pointwise_kernel']]
node_i = 2
for iter_i in range(1, bifpn_num_iterations+1):
for node_variable_patterns in expected_node_variable_patterns:
for pattern in node_variable_patterns:
expected_variables.append(pattern.format(node_i, iter_i))
node_i += 1
expected_variables = set(expected_variables)
actual_variable_set = set(
[var.name.split(':')[0] for var in bifpn_generator.variables])
self.assertSetEqual(expected_variables, actual_variable_set)
# TODO(aom): Tests for create_bifpn_combine_op.
if __name__ == '__main__':
tf.test.main()
...@@ -59,6 +59,7 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor( ...@@ -59,6 +59,7 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor(
is_training, first_stage_features_stride, batch_norm_trainable, is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay) weight_decay)
self._variable_dict = {} self._variable_dict = {}
self.classification_backbone = None
def preprocess(self, resized_inputs): def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing. """Faster R-CNN with Inception Resnet v2 preprocessing.
...@@ -95,19 +96,20 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor( ...@@ -95,19 +96,20 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor(
And returns rpn_feature_map: And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth] A tensor with shape [batch, height, width, depth]
""" """
with tf.name_scope(name): if not self.classification_backbone:
with tf.name_scope('InceptionResnetV2'): self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
model = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm, self._train_batch_norm,
output_stride=self._first_stage_features_stride, output_stride=self._first_stage_features_stride,
align_feature_maps=True, align_feature_maps=True,
weight_decay=self._weight_decay, weight_decay=self._weight_decay,
weights=None, weights=None,
include_top=False) include_top=False)
proposal_features = model.get_layer( with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'):
proposal_features = self.classification_backbone.get_layer(
name='block17_20_ac').output name='block17_20_ac').output
keras_model = tf.keras.Model( keras_model = tf.keras.Model(
inputs=model.inputs, inputs=self.classification_backbone.inputs,
outputs=proposal_features) outputs=proposal_features)
for variable in keras_model.variables: for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable self._variable_dict[variable.name[:-2]] = variable
...@@ -132,962 +134,26 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor( ...@@ -132,962 +134,26 @@ class FasterRCNNInceptionResnetV2KerasFeatureExtractor(
[batch_size * self.max_num_proposals, height, width, depth] [batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal. representing box classifier features for each proposal.
""" """
if not self.classification_backbone:
self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm,
output_stride=self._first_stage_features_stride,
align_feature_maps=True,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
with tf.name_scope(name): with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'): with tf.name_scope('InceptionResnetV2'):
model = inception_resnet_v2.inception_resnet_v2( proposal_feature_maps = self.classification_backbone.get_layer(
self._train_batch_norm,
output_stride=16,
align_feature_maps=False,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
proposal_feature_maps = model.get_layer(
name='block17_20_ac').output name='block17_20_ac').output
proposal_classifier_features = model.get_layer( proposal_classifier_features = self.classification_backbone.get_layer(
name='conv_7b_ac').output name='conv_7b_ac').output
keras_model = model_util.extract_submodel( keras_model = model_util.extract_submodel(
model=model, model=self.classification_backbone,
inputs=proposal_feature_maps, inputs=proposal_feature_maps,
outputs=proposal_classifier_features) outputs=proposal_classifier_features)
for variable in keras_model.variables: for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable self._variable_dict[variable.name[:-2]] = variable
return keras_model return keras_model
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
This uses a hard-coded conversion to load into Keras from a slim-trained
inception_resnet_v2 checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor which does not work
for InceptionResnetV2 checkpoints.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
keras_to_slim_name_mapping = {
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d/kernel': 'InceptionResnetV2/Conv2d_1a_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/beta': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/moving_mean': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm/moving_variance': 'InceptionResnetV2/Conv2d_1a_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_1/kernel': 'InceptionResnetV2/Conv2d_2a_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/beta': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/moving_mean': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_1/moving_variance': 'InceptionResnetV2/Conv2d_2a_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_2/kernel': 'InceptionResnetV2/Conv2d_2b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/beta': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/moving_mean': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_2/moving_variance': 'InceptionResnetV2/Conv2d_2b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_3/kernel': 'InceptionResnetV2/Conv2d_3b_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/beta': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/moving_mean': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_3/moving_variance': 'InceptionResnetV2/Conv2d_3b_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_4/kernel': 'InceptionResnetV2/Conv2d_4a_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/beta': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/moving_mean': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_4/moving_variance': 'InceptionResnetV2/Conv2d_4a_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_5/kernel': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/beta': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_5/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_6/kernel': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/beta': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_6/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_7/kernel': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/beta': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_7/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_1/Conv2d_0b_5x5/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_8/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_8/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_9/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_9/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_10/kernel': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/beta': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_10/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_11/kernel': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/beta': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/moving_mean': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_11/moving_variance': 'InceptionResnetV2/Mixed_5b/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_12/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_12/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_13/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_13/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_14/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_14/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_15/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_15/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_16/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_16/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_17/kernel': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/beta': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/moving_mean': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_17/moving_variance': 'InceptionResnetV2/Repeat/block35_1/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_1_conv/kernel': 'InceptionResnetV2/Repeat/block35_1/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_1_conv/bias': 'InceptionResnetV2/Repeat/block35_1/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_18/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_18/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_19/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_19/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_20/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_20/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_21/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_21/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_22/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_22/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_23/kernel': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/beta': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/moving_mean': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_23/moving_variance': 'InceptionResnetV2/Repeat/block35_2/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_2_conv/kernel': 'InceptionResnetV2/Repeat/block35_2/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_2_conv/bias': 'InceptionResnetV2/Repeat/block35_2/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_24/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_24/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_25/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_25/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_26/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_26/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_27/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_27/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_28/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_28/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_29/kernel': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/beta': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/moving_mean': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_29/moving_variance': 'InceptionResnetV2/Repeat/block35_3/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_3_conv/kernel': 'InceptionResnetV2/Repeat/block35_3/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_3_conv/bias': 'InceptionResnetV2/Repeat/block35_3/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_30/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_30/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_31/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_31/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_32/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_32/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_33/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_33/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_34/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_34/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_35/kernel': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/beta': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/moving_mean': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_35/moving_variance': 'InceptionResnetV2/Repeat/block35_4/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_4_conv/kernel': 'InceptionResnetV2/Repeat/block35_4/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_4_conv/bias': 'InceptionResnetV2/Repeat/block35_4/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_36/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_36/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_37/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_37/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_38/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_38/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_39/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_39/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_40/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_40/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_41/kernel': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/beta': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/moving_mean': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_41/moving_variance': 'InceptionResnetV2/Repeat/block35_5/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_5_conv/kernel': 'InceptionResnetV2/Repeat/block35_5/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_5_conv/bias': 'InceptionResnetV2/Repeat/block35_5/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_42/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_42/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_43/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_43/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_44/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_44/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_45/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_45/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_46/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_46/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_47/kernel': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/beta': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/moving_mean': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_47/moving_variance': 'InceptionResnetV2/Repeat/block35_6/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_6_conv/kernel': 'InceptionResnetV2/Repeat/block35_6/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_6_conv/bias': 'InceptionResnetV2/Repeat/block35_6/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_48/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_48/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_49/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_49/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_50/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_50/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_51/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_51/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_52/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_52/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_53/kernel': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/beta': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/moving_mean': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_53/moving_variance': 'InceptionResnetV2/Repeat/block35_7/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_7_conv/kernel': 'InceptionResnetV2/Repeat/block35_7/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_7_conv/bias': 'InceptionResnetV2/Repeat/block35_7/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_54/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_54/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_55/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_55/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_56/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_56/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_57/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_57/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_58/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_58/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_59/kernel': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/beta': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/moving_mean': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_59/moving_variance': 'InceptionResnetV2/Repeat/block35_8/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_8_conv/kernel': 'InceptionResnetV2/Repeat/block35_8/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_8_conv/bias': 'InceptionResnetV2/Repeat/block35_8/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_60/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_60/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_61/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_61/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_62/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_62/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_63/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_63/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_64/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_64/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_65/kernel': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/beta': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/moving_mean': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_65/moving_variance': 'InceptionResnetV2/Repeat/block35_9/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_9_conv/kernel': 'InceptionResnetV2/Repeat/block35_9/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_9_conv/bias': 'InceptionResnetV2/Repeat/block35_9/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_66/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_66/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_67/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_67/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_68/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_68/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_69/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_69/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_70/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_70/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_71/kernel': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/beta': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/moving_mean': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_71/moving_variance': 'InceptionResnetV2/Repeat/block35_10/Branch_2/Conv2d_0c_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_10_conv/kernel': 'InceptionResnetV2/Repeat/block35_10/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block35_10_conv/bias': 'InceptionResnetV2/Repeat/block35_10/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_72/kernel': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/beta': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_72/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_73/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_73/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_74/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_74/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_0b_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_75/kernel': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/beta': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/moving_mean': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_75/moving_variance': 'InceptionResnetV2/Mixed_6a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_76/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_76/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_77/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_77/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_78/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_78/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_79/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/beta': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/moving_mean': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_79/moving_variance': 'InceptionResnetV2/Repeat_1/block17_1/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_1_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_1/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_1_conv/bias': 'InceptionResnetV2/Repeat_1/block17_1/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_80/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_80/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_81/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_81/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_82/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_82/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_83/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/beta': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/moving_mean': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_83/moving_variance': 'InceptionResnetV2/Repeat_1/block17_2/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_2_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_2/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_2_conv/bias': 'InceptionResnetV2/Repeat_1/block17_2/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_84/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_84/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_85/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_85/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_86/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_86/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_87/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/beta': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/moving_mean': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_87/moving_variance': 'InceptionResnetV2/Repeat_1/block17_3/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_3_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_3/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_3_conv/bias': 'InceptionResnetV2/Repeat_1/block17_3/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_88/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_88/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_89/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_89/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_90/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_90/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_91/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/beta': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/moving_mean': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_91/moving_variance': 'InceptionResnetV2/Repeat_1/block17_4/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_4_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_4/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_4_conv/bias': 'InceptionResnetV2/Repeat_1/block17_4/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_92/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_92/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_93/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_93/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_94/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_94/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_95/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/beta': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/moving_mean': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_95/moving_variance': 'InceptionResnetV2/Repeat_1/block17_5/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_5_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_5/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_5_conv/bias': 'InceptionResnetV2/Repeat_1/block17_5/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_96/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_96/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_97/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_97/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_98/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_98/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_99/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/beta': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/moving_mean': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_99/moving_variance': 'InceptionResnetV2/Repeat_1/block17_6/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_6_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_6/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_6_conv/bias': 'InceptionResnetV2/Repeat_1/block17_6/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_100/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_100/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_101/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_101/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_102/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_102/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_103/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/beta': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/moving_mean': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_103/moving_variance': 'InceptionResnetV2/Repeat_1/block17_7/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_7_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_7/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_7_conv/bias': 'InceptionResnetV2/Repeat_1/block17_7/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_104/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_104/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_105/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_105/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_106/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_106/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_107/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/beta': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/moving_mean': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_107/moving_variance': 'InceptionResnetV2/Repeat_1/block17_8/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_8_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_8/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_8_conv/bias': 'InceptionResnetV2/Repeat_1/block17_8/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_108/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_108/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_109/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_109/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_110/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_110/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_111/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/beta': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/moving_mean': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_111/moving_variance': 'InceptionResnetV2/Repeat_1/block17_9/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_9_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_9/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_9_conv/bias': 'InceptionResnetV2/Repeat_1/block17_9/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_112/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_112/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_113/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_113/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_114/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_114/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_115/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/beta': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/moving_mean': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_115/moving_variance': 'InceptionResnetV2/Repeat_1/block17_10/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_10_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_10/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_10_conv/bias': 'InceptionResnetV2/Repeat_1/block17_10/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_116/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_116/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_117/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_117/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_118/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_118/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_119/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/beta': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/moving_mean': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_119/moving_variance': 'InceptionResnetV2/Repeat_1/block17_11/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_11_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_11/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_11_conv/bias': 'InceptionResnetV2/Repeat_1/block17_11/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_120/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_120/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_121/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_121/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_122/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_122/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_123/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/beta': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/moving_mean': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_123/moving_variance': 'InceptionResnetV2/Repeat_1/block17_12/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_12_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_12/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_12_conv/bias': 'InceptionResnetV2/Repeat_1/block17_12/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_124/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_124/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_125/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_125/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_126/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_126/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_127/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/beta': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/moving_mean': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_127/moving_variance': 'InceptionResnetV2/Repeat_1/block17_13/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_13_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_13/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_13_conv/bias': 'InceptionResnetV2/Repeat_1/block17_13/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_128/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_128/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_129/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_129/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_130/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_130/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_131/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/beta': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/moving_mean': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_131/moving_variance': 'InceptionResnetV2/Repeat_1/block17_14/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_14_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_14/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_14_conv/bias': 'InceptionResnetV2/Repeat_1/block17_14/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_132/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_132/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_133/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_133/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_134/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_134/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_135/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/beta': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/moving_mean': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_135/moving_variance': 'InceptionResnetV2/Repeat_1/block17_15/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_15_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_15/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_15_conv/bias': 'InceptionResnetV2/Repeat_1/block17_15/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_136/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_136/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_137/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_137/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_138/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_138/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_139/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/beta': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/moving_mean': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_139/moving_variance': 'InceptionResnetV2/Repeat_1/block17_16/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_16_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_16/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_16_conv/bias': 'InceptionResnetV2/Repeat_1/block17_16/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_140/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_140/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_141/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_141/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_142/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_142/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_143/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/beta': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/moving_mean': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_143/moving_variance': 'InceptionResnetV2/Repeat_1/block17_17/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_17_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_17/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_17_conv/bias': 'InceptionResnetV2/Repeat_1/block17_17/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_144/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_144/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_145/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_145/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_146/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_146/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_147/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/beta': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/moving_mean': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_147/moving_variance': 'InceptionResnetV2/Repeat_1/block17_18/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_18_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_18/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_18_conv/bias': 'InceptionResnetV2/Repeat_1/block17_18/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_148/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_148/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_149/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_149/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_150/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_150/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_151/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/beta': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/moving_mean': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_151/moving_variance': 'InceptionResnetV2/Repeat_1/block17_19/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_19_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_19/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_19_conv/bias': 'InceptionResnetV2/Repeat_1/block17_19/Conv2d_1x1/biases',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_152/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_152/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_153/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_153/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_154/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_154/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0b_1x7/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/conv2d_155/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/beta': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/beta',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/moving_mean': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_mean',
'FirstStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_155/moving_variance': 'InceptionResnetV2/Repeat_1/block17_20/Branch_1/Conv2d_0c_7x1/BatchNorm/moving_variance',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_20_conv/kernel': 'InceptionResnetV2/Repeat_1/block17_20/Conv2d_1x1/weights',
'FirstStageFeatureExtractor/InceptionResnetV2/block17_20_conv/bias': 'InceptionResnetV2/Repeat_1/block17_20/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_359/kernel': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/beta': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_359/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_360/kernel': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/beta': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_360/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_0/Conv2d_1a_3x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_361/kernel': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/beta': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_361/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_362/kernel': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/beta': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_362/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_1/Conv2d_1a_3x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_363/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_363/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_364/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_364/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_0b_3x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_365/kernel': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/beta': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/moving_mean': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_365/moving_variance': 'InceptionResnetV2/Mixed_7a/Branch_2/Conv2d_1a_3x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_366/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_366/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_367/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_367/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_368/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_368/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_369/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/beta': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/moving_mean': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_369/moving_variance': 'InceptionResnetV2/Repeat_2/block8_1/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_1_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_1/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_1_conv/bias': 'InceptionResnetV2/Repeat_2/block8_1/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_370/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_370/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_371/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_371/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_372/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_372/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_373/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/beta': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/moving_mean': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_373/moving_variance': 'InceptionResnetV2/Repeat_2/block8_2/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_2_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_2/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_2_conv/bias': 'InceptionResnetV2/Repeat_2/block8_2/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_374/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_374/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_375/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_375/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_376/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_376/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_377/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/beta': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/moving_mean': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_377/moving_variance': 'InceptionResnetV2/Repeat_2/block8_3/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_3_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_3/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_3_conv/bias': 'InceptionResnetV2/Repeat_2/block8_3/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_378/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_378/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_379/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_379/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_380/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_380/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_381/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/beta': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/moving_mean': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_381/moving_variance': 'InceptionResnetV2/Repeat_2/block8_4/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_4_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_4/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_4_conv/bias': 'InceptionResnetV2/Repeat_2/block8_4/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_382/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_382/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_383/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_383/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_384/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_384/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_385/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/beta': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/moving_mean': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_385/moving_variance': 'InceptionResnetV2/Repeat_2/block8_5/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_5_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_5/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_5_conv/bias': 'InceptionResnetV2/Repeat_2/block8_5/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_386/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_386/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_387/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_387/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_388/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_388/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_389/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/beta': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/moving_mean': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_389/moving_variance': 'InceptionResnetV2/Repeat_2/block8_6/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_6_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_6/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_6_conv/bias': 'InceptionResnetV2/Repeat_2/block8_6/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_390/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_390/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_391/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_391/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_392/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_392/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_393/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/beta': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/moving_mean': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_393/moving_variance': 'InceptionResnetV2/Repeat_2/block8_7/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_7_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_7/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_7_conv/bias': 'InceptionResnetV2/Repeat_2/block8_7/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_394/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_394/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_395/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_395/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_396/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_396/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_397/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/beta': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/moving_mean': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_397/moving_variance': 'InceptionResnetV2/Repeat_2/block8_8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_8_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_8/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_8_conv/bias': 'InceptionResnetV2/Repeat_2/block8_8/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_398/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_398/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_399/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_399/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_400/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_400/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_401/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/beta': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/moving_mean': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_401/moving_variance': 'InceptionResnetV2/Repeat_2/block8_9/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_9_conv/kernel': 'InceptionResnetV2/Repeat_2/block8_9/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_9_conv/bias': 'InceptionResnetV2/Repeat_2/block8_9/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_402/kernel': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/beta': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/moving_mean': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_402/moving_variance': 'InceptionResnetV2/Block8/Branch_0/Conv2d_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_403/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_403/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0a_1x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_404/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_404/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0b_1x3/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/conv2d_405/kernel': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/beta': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/moving_mean': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/freezable_batch_norm_405/moving_variance': 'InceptionResnetV2/Block8/Branch_1/Conv2d_0c_3x1/BatchNorm/moving_variance',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_10_conv/kernel': 'InceptionResnetV2/Block8/Conv2d_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/block8_10_conv/bias': 'InceptionResnetV2/Block8/Conv2d_1x1/biases',
'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b/kernel': 'InceptionResnetV2/Conv2d_7b_1x1/weights',
'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/beta': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/beta',
'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/moving_mean': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/moving_mean',
'SecondStageFeatureExtractor/InceptionResnetV2/conv_7b_bn/moving_variance': 'InceptionResnetV2/Conv2d_7b_1x1/BatchNorm/moving_variance',
}
variables_to_restore = {}
if tf.executing_eagerly():
for key in self._variable_dict:
# variable.name includes ":0" at the end, but the names in the
# checkpoint do not have the suffix ":0". So, we strip it here.
var_name = keras_to_slim_name_mapping.get(key)
if var_name:
variables_to_restore[var_name] = self._variable_dict[key]
else:
for variable in variables_helper.get_global_variables_safely():
var_name = keras_to_slim_name_mapping.get(variable.op.name)
if var_name:
variables_to_restore[var_name] = variable
return variables_to_restore
...@@ -73,7 +73,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): ...@@ -73,7 +73,7 @@ class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
proposal_classifier_features = ( proposal_classifier_features = (
model(proposal_feature_maps)) model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features) features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [2, 8, 8, 1536]) self.assertAllEqual(features_shape.numpy(), [2, 9, 9, 1536])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -175,23 +175,6 @@ class FasterRCNNResnetKerasFeatureExtractor( ...@@ -175,23 +175,6 @@ class FasterRCNNResnetKerasFeatureExtractor(
self._variable_dict[variable.name[:-2]] = variable self._variable_dict[variable.name[:-2]] = variable
return keras_model return keras_model
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map for restoring from an (object-based) checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor (unused).
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor (unused).
Returns:
A dict mapping keys to Keras models
"""
return {'feature_extractor': self.classification_backbone}
class FasterRCNNResnet50KerasFeatureExtractor( class FasterRCNNResnet50KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor): FasterRCNNResnetKerasFeatureExtractor):
......
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN Keras-based Resnet V1 FPN Feature Extractor."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import resnet_v1
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out'],
}
class FasterRCNNResnetV1FpnKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride,
conv_hyperparams,
batch_norm_trainable=False,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
conv_hyperparameters: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
batch_norm_trainable: See base class.
weight_decay: See base class.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet v1 layers.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
self._conv_hyperparams = conv_hyperparams
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._freeze_batchnorm = (not batch_norm_trainable)
self._override_base_feature_extractor_hyperparams = \
override_base_feature_extractor_hyperparams
self._resnet_block_names = ['block1', 'block2', 'block3', 'block4']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the Resnet v1 FPN network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A list of tensors with shape [batch, height, width, depth]
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
full_resnet_v1_model = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
classes=None,
weights=None,
include_top=False)
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name]
outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self.classification_backbone = tf.keras.Model(
inputs=full_resnet_v1_model.inputs,
outputs=outputs)
backbone_outputs = self.classification_backbone(full_resnet_v1_model.inputs)
# construct FPN feature generator
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._additional_layer_depth,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, backbone_outputs)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
# Construct coarse feature layers
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
self._additional_layer_depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_block{}'.format(level-1)])
last_feature_map = fpn_features['top_down_block{}'.format(
self._base_fpn_max_level - 1)]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
feature_extractor_model = tf.keras.models.Model(
inputs=full_resnet_v1_model.inputs, outputs=feature_maps)
return feature_extractor_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
Construct two fully connected layer to extract the box classifier features.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, 1024]
representing box classifier features for each proposal.
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
# TODO: Add a batchnorm layer between two fc layers.
feature_extractor_model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1024, activation='relu'),
tf.keras.layers.Dense(units=1024, activation='relu')
])
return feature_extractor_model
class FasterRCNNResnet50FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet50 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
conv_hyperparams=None,
batch_norm_trainable=False,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
conv_hyperparams: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams)
class FasterRCNNResnet101FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet101 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
conv_hyperparams=None,
batch_norm_trainable=False,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
conv_hyperparams: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams)
class FasterRCNNResnet152FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet152 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
conv_hyperparams=None,
batch_norm_trainable=False,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
conv_hyperparams: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams)
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_v1_fpn_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_res_fpn
from object_detection.utils import tf_version
from object_detection.protos import hyperparams_pb2
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_extractor(self):
return frcnn_res_fpn.FasterRCNNResnet50FpnKerasFeatureExtractor(
is_training=False,
conv_hyperparams=self._build_conv_hyperparams(),
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[2, 448, 448, 3], maxval=255, dtype=tf.float32)
rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shapes = [tf.shape(rpn_feature_map)
for rpn_feature_map in rpn_feature_maps]
self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256])
self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256])
self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256])
self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256])
self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[2, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shapes = [tf.shape(rpn_feature_map)
for rpn_feature_map in rpn_feature_maps]
self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256])
self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256])
self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256])
self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256])
self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256])
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [3, 1024])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment