Commit 3ce2f61b authored by Kaushik Shivakumar's avatar Kaushik Shivakumar
Browse files

Merge branch 'master' of https://github.com/tensorflow/models into context_tf2

parents bb16d5ca 8e9296ff
...@@ -39,6 +39,9 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): ...@@ -39,6 +39,9 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase):
def ssd_feature_extractors(self): def ssd_feature_extractors(self):
raise NotImplementedError raise NotImplementedError
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
raise NotImplementedError
def faster_rcnn_feature_extractors(self): def faster_rcnn_feature_extractors(self):
raise NotImplementedError raise NotImplementedError
...@@ -70,7 +73,6 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): ...@@ -70,7 +73,6 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase):
} }
} }
} }
override_base_feature_extractor_hyperparams: true
} }
box_coder { box_coder {
faster_rcnn_box_coder { faster_rcnn_box_coder {
...@@ -205,6 +207,8 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): ...@@ -205,6 +207,8 @@ class ModelBuilderTest(test_case.TestCase, parameterized.TestCase):
for extractor_type, extractor_class in self.ssd_feature_extractors().items( for extractor_type, extractor_class in self.ssd_feature_extractors().items(
): ):
model_proto.ssd.feature_extractor.type = extractor_type model_proto.ssd.feature_extractor.type = extractor_type
model_proto.ssd.feature_extractor.override_base_feature_extractor_hyperparams = (
self.get_override_base_feature_extractor_hyperparams(extractor_type))
model = model_builder.build(model_proto, is_training=True) model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class) self.assertIsInstance(model._feature_extractor, extractor_class)
......
...@@ -38,6 +38,9 @@ class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest): ...@@ -38,6 +38,9 @@ class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest):
def ssd_feature_extractors(self): def ssd_feature_extractors(self):
return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {'ssd_inception_v2', 'ssd_inception_v3'}
def faster_rcnn_feature_extractors(self): def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
......
...@@ -42,6 +42,9 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): ...@@ -42,6 +42,9 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest):
def ssd_feature_extractors(self): def ssd_feature_extractors(self):
return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {}
def faster_rcnn_feature_extractors(self): def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
...@@ -161,6 +164,28 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): ...@@ -161,6 +164,28 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest):
return text_format.Merge(proto_txt, return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.MaskEstimation()) center_net_pb2.CenterNet.MaskEstimation())
def get_fake_densepose_proto(self):
proto_txt = """
task_loss_weight: 0.5
class_id: 0
loss {
classification_loss {
weighted_softmax {}
}
localization_loss {
l1_localization_loss {
}
}
}
num_parts: 24
part_loss_weight: 1.0
coordinate_loss_weight: 2.0
upsample_to_input_res: true
heatmap_bias_init: -2.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.DensePoseEstimation())
def test_create_center_net_model(self): def test_create_center_net_model(self):
"""Test building a CenterNet model from proto txt.""" """Test building a CenterNet model from proto txt."""
proto_txt = """ proto_txt = """
...@@ -192,6 +217,8 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): ...@@ -192,6 +217,8 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest):
self.get_fake_label_map_file_path()) self.get_fake_label_map_file_path())
config.center_net.mask_estimation_task.CopyFrom( config.center_net.mask_estimation_task.CopyFrom(
self.get_fake_mask_proto()) self.get_fake_mask_proto())
config.center_net.densepose_estimation_task.CopyFrom(
self.get_fake_densepose_proto())
# Build the model from the configuration. # Build the model from the configuration.
model = model_builder.build(config, is_training=True) model = model_builder.build(config, is_training=True)
...@@ -248,6 +275,21 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): ...@@ -248,6 +275,21 @@ class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest):
self.assertAlmostEqual( self.assertAlmostEqual(
model._mask_params.heatmap_bias_init, -2.0, places=4) model._mask_params.heatmap_bias_init, -2.0, places=4)
# Check DensePose related parameters.
self.assertEqual(model._densepose_params.class_id, 0)
self.assertIsInstance(model._densepose_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(model._densepose_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0)
self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0)
self.assertEqual(model._densepose_params.num_parts, 24)
self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5)
self.assertTrue(model._densepose_params.upsample_to_input_res)
self.assertEqual(model._densepose_params.upsample_method, 'bilinear')
self.assertAlmostEqual(
model._densepose_params.heatmap_bias_init, -2.0, places=4)
# Check feature extractor parameters. # Check feature extractor parameters.
self.assertIsInstance( self.assertIsInstance(
model._feature_extractor, model._feature_extractor,
......
...@@ -417,4 +417,12 @@ def build(preprocessor_step_config): ...@@ -417,4 +417,12 @@ def build(preprocessor_step_config):
'num_scales': config.num_scales 'num_scales': config.num_scales
} }
if step_type == 'random_scale_crop_and_pad_to_square':
config = preprocessor_step_config.random_scale_crop_and_pad_to_square
return preprocessor.random_scale_crop_and_pad_to_square, {
'scale_min': config.scale_min,
'scale_max': config.scale_max,
'output_size': config.output_size,
}
raise ValueError('Unknown preprocessing step.') raise ValueError('Unknown preprocessing step.')
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "rOvvWAVTkMR7"
},
"source": [
"# Eager Few Shot Object Detection Colab\n",
"\n",
"Welcome to the Eager Few Shot Object Detection Colab --- in this colab we demonstrate fine tuning of a (TF2 friendly) RetinaNet architecture on very few examples of a novel class after initializing from a pre-trained COCO checkpoint.\n",
"Training runs in eager mode.\n",
"\n",
"Estimated time to run through this colab (with GPU): \u003c 5 minutes."
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "vPs64QA1Zdov"
},
"source": [
"## Imports"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "LBZ9VWZZFUCT"
},
"outputs": [],
"source": [
"!pip install -U --pre tensorflow==\"2.2.0\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "oi28cqGGFWnY"
},
"outputs": [],
"source": [
"import os\n",
"import pathlib\n",
"\n",
"# Clone the tensorflow models repository if it doesn't already exist\n",
"if \"models\" in pathlib.Path.cwd().parts:\n",
" while \"models\" in pathlib.Path.cwd().parts:\n",
" os.chdir('..')\n",
"elif not pathlib.Path('models').exists():\n",
" !git clone --depth 1 https://github.com/tensorflow/models"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "NwdsBdGhFanc"
},
"outputs": [],
"source": [
"# Install the Object Detection API\n",
"%%bash\n",
"cd models/research/\n",
"protoc object_detection/protos/*.proto --python_out=.\n",
"cp object_detection/packages/tf2/setup.py .\n",
"python -m pip install ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "uZcqD4NLdnf4"
},
"outputs": [],
"source": [
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import os\n",
"import random\n",
"import io\n",
"import imageio\n",
"import glob\n",
"import scipy.misc\n",
"import numpy as np\n",
"from six import BytesIO\n",
"from PIL import Image, ImageDraw, ImageFont\n",
"from IPython.display import display, Javascript\n",
"from IPython.display import Image as IPyImage\n",
"\n",
"import tensorflow as tf\n",
"\n",
"from object_detection.utils import label_map_util\n",
"from object_detection.utils import config_util\n",
"from object_detection.utils import visualization_utils as viz_utils\n",
"from object_detection.utils import colab_utils\n",
"from object_detection.builders import model_builder\n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "IogyryF2lFBL"
},
"source": [
"# Utilities"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "-y9R0Xllefec"
},
"outputs": [],
"source": [
"def load_image_into_numpy_array(path):\n",
" \"\"\"Load an image from file into a numpy array.\n",
"\n",
" Puts image into numpy array to feed into tensorflow graph.\n",
" Note that by convention we put it into a numpy array with shape\n",
" (height, width, channels), where channels=3 for RGB.\n",
"\n",
" Args:\n",
" path: a file path.\n",
"\n",
" Returns:\n",
" uint8 numpy array with shape (img_height, img_width, 3)\n",
" \"\"\"\n",
" img_data = tf.io.gfile.GFile(path, 'rb').read()\n",
" image = Image.open(BytesIO(img_data))\n",
" (im_width, im_height) = image.size\n",
" return np.array(image.getdata()).reshape(\n",
" (im_height, im_width, 3)).astype(np.uint8)\n",
"\n",
"def plot_detections(image_np,\n",
" boxes,\n",
" classes,\n",
" scores,\n",
" category_index,\n",
" figsize=(12, 16),\n",
" image_name=None):\n",
" \"\"\"Wrapper function to visualize detections.\n",
"\n",
" Args:\n",
" image_np: uint8 numpy array with shape (img_height, img_width, 3)\n",
" boxes: a numpy array of shape [N, 4]\n",
" classes: a numpy array of shape [N]. Note that class indices are 1-based,\n",
" and match the keys in the label map.\n",
" scores: a numpy array of shape [N] or None. If scores=None, then\n",
" this function assumes that the boxes to be plotted are groundtruth\n",
" boxes and plot all boxes as black with no classes or scores.\n",
" category_index: a dict containing category dictionaries (each holding\n",
" category index `id` and category name `name`) keyed by category indices.\n",
" figsize: size for the figure.\n",
" image_name: a name for the image file.\n",
" \"\"\"\n",
" image_np_with_annotations = image_np.copy()\n",
" viz_utils.visualize_boxes_and_labels_on_image_array(\n",
" image_np_with_annotations,\n",
" boxes,\n",
" classes,\n",
" scores,\n",
" category_index,\n",
" use_normalized_coordinates=True,\n",
" min_score_thresh=0.8)\n",
" if image_name:\n",
" plt.imsave(image_name, image_np_with_annotations)\n",
" else:\n",
" plt.imshow(image_np_with_annotations)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "sSaXL28TZfk1"
},
"source": [
"# Rubber Ducky data\n",
"\n",
"We will start with some toy (literally) data consisting of 5 images of a rubber\n",
"ducky. Note that the [coco](https://cocodataset.org/#explore) dataset contains a number of animals, but notably, it does *not* contain rubber duckies (or even ducks for that matter), so this is a novel class."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "SQy3ND7EpFQM"
},
"outputs": [],
"source": [
"# Load images and visualize\n",
"train_image_dir = 'models/research/object_detection/test_images/ducky/train/'\n",
"train_images_np = []\n",
"for i in range(1, 6):\n",
" image_path = os.path.join(train_image_dir, 'robertducky' + str(i) + '.jpg')\n",
" train_images_np.append(load_image_into_numpy_array(image_path))\n",
"\n",
"plt.rcParams['axes.grid'] = False\n",
"plt.rcParams['xtick.labelsize'] = False\n",
"plt.rcParams['ytick.labelsize'] = False\n",
"plt.rcParams['xtick.top'] = False\n",
"plt.rcParams['xtick.bottom'] = False\n",
"plt.rcParams['ytick.left'] = False\n",
"plt.rcParams['ytick.right'] = False\n",
"plt.rcParams['figure.figsize'] = [14, 7]\n",
"\n",
"for idx, train_image_np in enumerate(train_images_np):\n",
" plt.subplot(2, 3, idx+1)\n",
" plt.imshow(train_image_np)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "cbKXmQoxcUgE"
},
"source": [
"# Annotate images with bounding boxes\n",
"\n",
"In this cell you will annotate the rubber duckies --- draw a box around the rubber ducky in each image; click `next image` to go to the next image and `submit` when there are no more images.\n",
"\n",
"If you'd like to skip the manual annotation step, we totally understand. In this case, simply skip this cell and run the next cell instead, where we've prepopulated the groundtruth with pre-annotated bounding boxes.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "-nEDRoUEcUgL"
},
"outputs": [],
"source": [
"gt_boxes = []\n",
"colab_utils.annotate(train_images_np, box_storage_pointer=gt_boxes)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "wTP9AFqecUgS"
},
"source": [
"# In case you didn't want to label...\n",
"\n",
"Run this cell only if you didn't annotate anything above and\n",
"would prefer to just use our preannotated boxes. Don't forget\n",
"to uncomment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "wIAT6ZUmdHOC"
},
"outputs": [],
"source": [
"# gt_boxes = [\n",
"# np.array([[0.436, 0.591, 0.629, 0.712]], dtype=np.float32),\n",
"# np.array([[0.539, 0.583, 0.73, 0.71]], dtype=np.float32),\n",
"# np.array([[0.464, 0.414, 0.626, 0.548]], dtype=np.float32),\n",
"# np.array([[0.313, 0.308, 0.648, 0.526]], dtype=np.float32),\n",
"# np.array([[0.256, 0.444, 0.484, 0.629]], dtype=np.float32)\n",
"# ]"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "Dqb_yjAo3cO_"
},
"source": [
"# Prepare data for training\n",
"\n",
"Below we add the class annotations (for simplicity, we assume a single class in this colab; though it should be straightforward to extend this to handle multiple classes). We also convert everything to the format that the training\n",
"loop below expects (e.g., everything converted to tensors, classes converted to one-hot representations, etc.)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "HWBqFVMcweF-"
},
"outputs": [],
"source": [
"\n",
"# By convention, our non-background classes start counting at 1. Given\n",
"# that we will be predicting just one class, we will therefore assign it a\n",
"# `class id` of 1.\n",
"duck_class_id = 1\n",
"num_classes = 1\n",
"\n",
"category_index = {duck_class_id: {'id': duck_class_id, 'name': 'rubber_ducky'}}\n",
"\n",
"# Convert class labels to one-hot; convert everything to tensors.\n",
"# The `label_id_offset` here shifts all classes by a certain number of indices;\n",
"# we do this here so that the model receives one-hot labels where non-background\n",
"# classes start counting at the zeroth index. This is ordinarily just handled\n",
"# automatically in our training binaries, but we need to reproduce it here.\n",
"label_id_offset = 1\n",
"train_image_tensors = []\n",
"gt_classes_one_hot_tensors = []\n",
"gt_box_tensors = []\n",
"for (train_image_np, gt_box_np) in zip(\n",
" train_images_np, gt_boxes):\n",
" train_image_tensors.append(tf.expand_dims(tf.convert_to_tensor(\n",
" train_image_np, dtype=tf.float32), axis=0))\n",
" gt_box_tensors.append(tf.convert_to_tensor(gt_box_np, dtype=tf.float32))\n",
" zero_indexed_groundtruth_classes = tf.convert_to_tensor(\n",
" np.ones(shape=[gt_box_np.shape[0]], dtype=np.int32) - label_id_offset)\n",
" gt_classes_one_hot_tensors.append(tf.one_hot(\n",
" zero_indexed_groundtruth_classes, num_classes))\n",
"print('Done prepping data.')\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "b3_Z3mJWN9KJ"
},
"source": [
"# Let's just visualize the rubber duckies as a sanity check\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "YBD6l-E4N71y"
},
"outputs": [],
"source": [
"dummy_scores = np.array([1.0], dtype=np.float32) # give boxes a score of 100%\n",
"\n",
"plt.figure(figsize=(30, 15))\n",
"for idx in range(5):\n",
" plt.subplot(2, 3, idx+1)\n",
" plot_detections(\n",
" train_images_np[idx],\n",
" gt_boxes[idx],\n",
" np.ones(shape=[gt_boxes[idx].shape[0]], dtype=np.int32),\n",
" dummy_scores, category_index)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "ghDAsqfoZvPh"
},
"source": [
"# Create model and restore weights for all but last layer\n",
"\n",
"In this cell we build a single stage detection architecture (RetinaNet) and restore all but the classification layer at the top (which will be automatically randomly initialized).\n",
"\n",
"For simplicity, we have hardcoded a number of things in this colab for the specific RetinaNet architecture at hand (including assuming that the image size will always be 640x640), however it is not difficult to generalize to other model configurations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "9J16r3NChD-7"
},
"outputs": [],
"source": [
"# Download the checkpoint and put it into models/research/object_detection/test_data/\n",
"\n",
"!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"!tar -xf ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz\n",
"!mv ssd_resnet50_v1_fpn_640x640_coco17_tpu-8/checkpoint models/research/object_detection/test_data/"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "RyT4BUbaMeG-"
},
"outputs": [],
"source": [
"tf.keras.backend.clear_session()\n",
"\n",
"print('Building model and restoring weights for fine-tuning...', flush=True)\n",
"num_classes = 1\n",
"pipeline_config = 'models/research/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config'\n",
"checkpoint_path = 'models/research/object_detection/test_data/checkpoint/ckpt-0'\n",
"\n",
"# Load pipeline config and build a detection model.\n",
"#\n",
"# Since we are working off of a COCO architecture which predicts 90\n",
"# class slots by default, we override the `num_classes` field here to be just\n",
"# one (for our new rubber ducky class).\n",
"configs = config_util.get_configs_from_pipeline_file(pipeline_config)\n",
"model_config = configs['model']\n",
"model_config.ssd.num_classes = num_classes\n",
"model_config.ssd.freeze_batchnorm = True\n",
"detection_model = model_builder.build(\n",
" model_config=model_config, is_training=True)\n",
"\n",
"# Set up object-based checkpoint restore --- RetinaNet has two prediction\n",
"# `heads` --- one for classification, the other for box regression. We will\n",
"# restore the box regression head but initialize the classification head\n",
"# from scratch (we show the omission below by commenting out the line that\n",
"# we would add if we wanted to restore both heads)\n",
"fake_box_predictor = tf.compat.v2.train.Checkpoint(\n",
" _base_tower_layers_for_heads=detection_model._box_predictor._base_tower_layers_for_heads,\n",
" # _prediction_heads=detection_model._box_predictor._prediction_heads,\n",
" # (i.e., the classification head that we *will not* restore)\n",
" _box_prediction_head=detection_model._box_predictor._box_prediction_head,\n",
" )\n",
"fake_model = tf.compat.v2.train.Checkpoint(\n",
" _feature_extractor=detection_model._feature_extractor,\n",
" _box_predictor=fake_box_predictor)\n",
"ckpt = tf.compat.v2.train.Checkpoint(model=fake_model)\n",
"ckpt.restore(checkpoint_path).expect_partial()\n",
"\n",
"# Run model through a dummy image so that variables are created\n",
"image, shapes = detection_model.preprocess(tf.zeros([1, 640, 640, 3]))\n",
"prediction_dict = detection_model.predict(image, shapes)\n",
"_ = detection_model.postprocess(prediction_dict, shapes)\n",
"print('Weights restored!')"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "pCkWmdoZZ0zJ"
},
"source": [
"# Eager mode custom training loop\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "nyHoF4mUrv5-"
},
"outputs": [],
"source": [
"tf.keras.backend.set_learning_phase(True)\n",
"\n",
"# These parameters can be tuned; since our training set has 5 images\n",
"# it doesn't make sense to have a much larger batch size, though we could\n",
"# fit more examples in memory if we wanted to.\n",
"batch_size = 4\n",
"learning_rate = 0.01\n",
"num_batches = 100\n",
"\n",
"# Select variables in top layers to fine-tune.\n",
"trainable_variables = detection_model.trainable_variables\n",
"to_fine_tune = []\n",
"prefixes_to_train = [\n",
" 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead',\n",
" 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead']\n",
"for var in trainable_variables:\n",
" if any([var.name.startswith(prefix) for prefix in prefixes_to_train]):\n",
" to_fine_tune.append(var)\n",
"\n",
"# Set up forward + backward pass for a single train step.\n",
"def get_model_train_step_function(model, optimizer, vars_to_fine_tune):\n",
" \"\"\"Get a tf.function for training step.\"\"\"\n",
"\n",
" # Use tf.function for a bit of speed.\n",
" # Comment out the tf.function decorator if you want the inside of the\n",
" # function to run eagerly.\n",
" @tf.function\n",
" def train_step_fn(image_tensors,\n",
" groundtruth_boxes_list,\n",
" groundtruth_classes_list):\n",
" \"\"\"A single training iteration.\n",
"\n",
" Args:\n",
" image_tensors: A list of [1, height, width, 3] Tensor of type tf.float32.\n",
" Note that the height and width can vary across images, as they are\n",
" reshaped within this function to be 640x640.\n",
" groundtruth_boxes_list: A list of Tensors of shape [N_i, 4] with type\n",
" tf.float32 representing groundtruth boxes for each image in the batch.\n",
" groundtruth_classes_list: A list of Tensors of shape [N_i, num_classes]\n",
" with type tf.float32 representing groundtruth boxes for each image in\n",
" the batch.\n",
"\n",
" Returns:\n",
" A scalar tensor representing the total loss for the input batch.\n",
" \"\"\"\n",
" shapes = tf.constant(batch_size * [[640, 640, 3]], dtype=tf.int32)\n",
" model.provide_groundtruth(\n",
" groundtruth_boxes_list=groundtruth_boxes_list,\n",
" groundtruth_classes_list=groundtruth_classes_list)\n",
" with tf.GradientTape() as tape:\n",
" preprocessed_images = tf.concat(\n",
" [detection_model.preprocess(image_tensor)[0]\n",
" for image_tensor in image_tensors], axis=0)\n",
" prediction_dict = model.predict(preprocessed_images, shapes)\n",
" losses_dict = model.loss(prediction_dict, shapes)\n",
" total_loss = losses_dict['Loss/localization_loss'] + losses_dict['Loss/classification_loss']\n",
" gradients = tape.gradient(total_loss, vars_to_fine_tune)\n",
" optimizer.apply_gradients(zip(gradients, vars_to_fine_tune))\n",
" return total_loss\n",
"\n",
" return train_step_fn\n",
"\n",
"optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)\n",
"train_step_fn = get_model_train_step_function(\n",
" detection_model, optimizer, to_fine_tune)\n",
"\n",
"print('Start fine-tuning!', flush=True)\n",
"for idx in range(num_batches):\n",
" # Grab keys for a random subset of examples\n",
" all_keys = list(range(len(train_images_np)))\n",
" random.shuffle(all_keys)\n",
" example_keys = all_keys[:batch_size]\n",
"\n",
" # Note that we do not do data augmentation in this demo. If you want a\n",
" # a fun exercise, we recommend experimenting with random horizontal flipping\n",
" # and random cropping :)\n",
" gt_boxes_list = [gt_box_tensors[key] for key in example_keys]\n",
" gt_classes_list = [gt_classes_one_hot_tensors[key] for key in example_keys]\n",
" image_tensors = [train_image_tensors[key] for key in example_keys]\n",
"\n",
" # Training step (forward pass + backwards pass)\n",
" total_loss = train_step_fn(image_tensors, gt_boxes_list, gt_classes_list)\n",
"\n",
" if idx % 10 == 0:\n",
" print('batch ' + str(idx) + ' of ' + str(num_batches)\n",
" + ', loss=' + str(total_loss.numpy()), flush=True)\n",
"\n",
"print('Done fine-tuning!')"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "WHlXL1x_Z3tc"
},
"source": [
"# Load test images and run inference with new model!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "WcE6OwrHQJya"
},
"outputs": [],
"source": [
"test_image_dir = 'models/research/object_detection/test_images/ducky/test/'\n",
"test_images_np = []\n",
"for i in range(1, 50):\n",
" image_path = os.path.join(test_image_dir, 'out' + str(i) + '.jpg')\n",
" test_images_np.append(np.expand_dims(\n",
" load_image_into_numpy_array(image_path), axis=0))\n",
"\n",
"# Again, uncomment this decorator if you want to run inference eagerly\n",
"@tf.function\n",
"def detect(input_tensor):\n",
" \"\"\"Run detection on an input image.\n",
"\n",
" Args:\n",
" input_tensor: A [1, height, width, 3] Tensor of type tf.float32.\n",
" Note that height and width can be anything since the image will be\n",
" immediately resized according to the needs of the model within this\n",
" function.\n",
"\n",
" Returns:\n",
" A dict containing 3 Tensors (`detection_boxes`, `detection_classes`,\n",
" and `detection_scores`).\n",
" \"\"\"\n",
" preprocessed_image, shapes = detection_model.preprocess(input_tensor)\n",
" prediction_dict = detection_model.predict(preprocessed_image, shapes)\n",
" return detection_model.postprocess(prediction_dict, shapes)\n",
"\n",
"# Note that the first frame will trigger tracing of the tf.function, which will\n",
"# take some time, after which inference should be fast.\n",
"\n",
"label_id_offset = 1\n",
"for i in range(len(test_images_np)):\n",
" input_tensor = tf.convert_to_tensor(test_images_np[i], dtype=tf.float32)\n",
" detections = detect(input_tensor)\n",
"\n",
" plot_detections(\n",
" test_images_np[i][0],\n",
" detections['detection_boxes'][0].numpy(),\n",
" detections['detection_classes'][0].numpy().astype(np.uint32)\n",
" + label_id_offset,\n",
" detections['detection_scores'][0].numpy(),\n",
" category_index, figsize=(15, 20), image_name=\"gif_frame_\" + ('%02d' % i) + \".jpg\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "RW1FrT2iNnpy"
},
"outputs": [],
"source": [
"imageio.plugins.freeimage.download()\n",
"\n",
"anim_file = 'duckies_test.gif'\n",
"\n",
"filenames = glob.glob('gif_frame_*.jpg')\n",
"filenames = sorted(filenames)\n",
"last = -1\n",
"images = []\n",
"for filename in filenames:\n",
" image = imageio.imread(filename)\n",
" images.append(image)\n",
"\n",
"imageio.mimsave(anim_file, images, 'GIF-FI', fps=5)\n",
"\n",
"display(IPyImage(open(anim_file, 'rb').read()))"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "interactive_eager_few_shot_od_training_colab.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "rOvvWAVTkMR7"
},
"source": [
"# Intro to Object Detection Colab\n",
"\n",
"Welcome to the object detection colab! This demo will take you through the steps of running an \"out-of-the-box\" detection model on a collection of images."
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "vPs64QA1Zdov"
},
"source": [
"## Imports and Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "LBZ9VWZZFUCT"
},
"outputs": [],
"source": [
"!pip install -U --pre tensorflow==\"2.2.0\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "oi28cqGGFWnY"
},
"outputs": [],
"source": [
"import os\n",
"import pathlib\n",
"\n",
"# Clone the tensorflow models repository if it doesn't already exist\n",
"if \"models\" in pathlib.Path.cwd().parts:\n",
" while \"models\" in pathlib.Path.cwd().parts:\n",
" os.chdir('..')\n",
"elif not pathlib.Path('models').exists():\n",
" !git clone --depth 1 https://github.com/tensorflow/models"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "NwdsBdGhFanc"
},
"outputs": [],
"source": [
"# Install the Object Detection API\n",
"%%bash\n",
"cd models/research/\n",
"protoc object_detection/protos/*.proto --python_out=.\n",
"cp object_detection/packages/tf2/setup.py .\n",
"python -m pip install ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "yn5_uV1HLvaz"
},
"outputs": [],
"source": [
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import io\n",
"import scipy.misc\n",
"import numpy as np\n",
"from six import BytesIO\n",
"from PIL import Image, ImageDraw, ImageFont\n",
"\n",
"import tensorflow as tf\n",
"\n",
"from object_detection.utils import label_map_util\n",
"from object_detection.utils import config_util\n",
"from object_detection.utils import visualization_utils as viz_utils\n",
"from object_detection.builders import model_builder\n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "IogyryF2lFBL"
},
"source": [
"## Utilities"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "-y9R0Xllefec"
},
"outputs": [],
"source": [
"def load_image_into_numpy_array(path):\n",
" \"\"\"Load an image from file into a numpy array.\n",
"\n",
" Puts image into numpy array to feed into tensorflow graph.\n",
" Note that by convention we put it into a numpy array with shape\n",
" (height, width, channels), where channels=3 for RGB.\n",
"\n",
" Args:\n",
" path: the file path to the image\n",
"\n",
" Returns:\n",
" uint8 numpy array with shape (img_height, img_width, 3)\n",
" \"\"\"\n",
" img_data = tf.io.gfile.GFile(path, 'rb').read()\n",
" image = Image.open(BytesIO(img_data))\n",
" (im_width, im_height) = image.size\n",
" return np.array(image.getdata()).reshape(\n",
" (im_height, im_width, 3)).astype(np.uint8)\n",
"\n",
"def get_keypoint_tuples(eval_config):\n",
" \"\"\"Return a tuple list of keypoint edges from the eval config.\n",
" \n",
" Args:\n",
" eval_config: an eval config containing the keypoint edges\n",
" \n",
" Returns:\n",
" a list of edge tuples, each in the format (start, end)\n",
" \"\"\"\n",
" tuple_list = []\n",
" kp_list = eval_config.keypoint_edge\n",
" for edge in kp_list:\n",
" tuple_list.append((edge.start, edge.end))\n",
" return tuple_list"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "R4YjnOjME1gy"
},
"outputs": [],
"source": [
"# @title Choose the model to use, then evaluate the cell.\n",
"MODELS = {'centernet_with_keypoints': 'centernet_hg104_512x512_kpts_coco17_tpu-32', 'centernet_without_keypoints': 'centernet_hg104_512x512_coco17_tpu-8'}\n",
"\n",
"model_display_name = 'centernet_with_keypoints' # @param ['centernet_with_keypoints', 'centernet_without_keypoints']\n",
"model_name = MODELS[model_display_name]"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "6917xnUSlp9x"
},
"source": [
"### Build a detection model and load pre-trained model weights\n",
"\n",
"This sometimes takes a little while, please be patient!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ctPavqlyPuU_"
},
"outputs": [],
"source": [
"# Download the checkpoint and put it into models/research/object_detection/test_data/\n",
"\n",
"if model_display_name == 'centernet_with_keypoints':\n",
" !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_512x512_kpts_coco17_tpu-32.tar.gz\n",
" !tar -xf centernet_hg104_512x512_kpts_coco17_tpu-32.tar.gz\n",
" !mv centernet_hg104_512x512_kpts_coco17_tpu-32/checkpoint models/research/object_detection/test_data/\n",
"else:\n",
" !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_512x512_coco17_tpu-8.tar.gz\n",
" !tar -xf centernet_hg104_512x512_coco17_tpu-8.tar.gz\n",
" !mv centernet_hg104_512x512_coco17_tpu-8/checkpoint models/research/object_detection/test_data/"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "4cni4SSocvP_"
},
"outputs": [],
"source": [
"pipeline_config = os.path.join('models/research/object_detection/configs/tf2/',\n",
" model_name + '.config')\n",
"model_dir = 'models/research/object_detection/test_data/checkpoint/'\n",
"\n",
"# Load pipeline config and build a detection model\n",
"configs = config_util.get_configs_from_pipeline_file(pipeline_config)\n",
"model_config = configs['model']\n",
"detection_model = model_builder.build(\n",
" model_config=model_config, is_training=False)\n",
"\n",
"# Restore checkpoint\n",
"ckpt = tf.compat.v2.train.Checkpoint(\n",
" model=detection_model)\n",
"ckpt.restore(os.path.join(model_dir, 'ckpt-0')).expect_partial()\n",
"\n",
"def get_model_detection_function(model):\n",
" \"\"\"Get a tf.function for detection.\"\"\"\n",
"\n",
" @tf.function\n",
" def detect_fn(image):\n",
" \"\"\"Detect objects in image.\"\"\"\n",
"\n",
" image, shapes = model.preprocess(image)\n",
" prediction_dict = model.predict(image, shapes)\n",
" detections = model.postprocess(prediction_dict, shapes)\n",
"\n",
" return detections, prediction_dict, tf.reshape(shapes, [-1])\n",
"\n",
" return detect_fn\n",
"\n",
"detect_fn = get_model_detection_function(detection_model)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "NKtD0IeclbL5"
},
"source": [
"# Load label map data (for plotting).\n",
"\n",
"Label maps correspond index numbers to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "5mucYUS6exUJ"
},
"outputs": [],
"source": [
"label_map_path = configs['eval_input_config'].label_map_path\n",
"label_map = label_map_util.load_labelmap(label_map_path)\n",
"categories = label_map_util.convert_label_map_to_categories(\n",
" label_map,\n",
" max_num_classes=label_map_util.get_max_label_map_index(label_map),\n",
" use_display_name=True)\n",
"category_index = label_map_util.create_category_index(categories)\n",
"label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "RLusV1o-mAx8"
},
"source": [
"### Putting everything together!\n",
"\n",
"Run the below code which loads an image, runs it through the detection model and visualizes the detection results, including the keypoints.\n",
"\n",
"Note that this will take a long time (several minutes) the first time you run this code due to tf.function's trace-compilation --- on subsequent runs (e.g. on new images), things will be faster.\n",
"\n",
"Here are some simple things to try out if you are curious:\n",
"* Try running inference on your own images (local paths work)\n",
"* Modify some of the input images and see if detection still works. Some simple things to try out here (just uncomment the relevant portions of code) include flipping the image horizontally, or converting to grayscale (note that we still expect the input image to have 3 channels).\n",
"* Print out `detections['detection_boxes']` and try to match the box locations to the boxes in the image. Notice that coordinates are given in normalized form (i.e., in the interval [0, 1]).\n",
"* Set min_score_thresh to other values (between 0 and 1) to allow more detections in or to filter out more detections.\n",
"\n",
"Note that you can run this cell repeatedly without rerunning earlier cells.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "vr_Fux-gfaG9"
},
"outputs": [],
"source": [
"image_dir = 'models/research/object_detection/test_images/'\n",
"image_path = os.path.join(image_dir, 'image2.jpg')\n",
"image_np = load_image_into_numpy_array(image_path)\n",
"\n",
"# Things to try:\n",
"# Flip horizontally\n",
"# image_np = np.fliplr(image_np).copy()\n",
"\n",
"# Convert image to grayscale\n",
"# image_np = np.tile(\n",
"# np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)\n",
"\n",
"input_tensor = tf.convert_to_tensor(\n",
" np.expand_dims(image_np, 0), dtype=tf.float32)\n",
"detections, predictions_dict, shapes = detect_fn(input_tensor)\n",
"\n",
"label_id_offset = 1\n",
"image_np_with_detections = image_np.copy()\n",
"\n",
"# Use keypoints if available in detections\n",
"keypoints, keypoint_scores = None, None\n",
"if 'detection_keypoints' in detections:\n",
" keypoints = detections['detection_keypoints'][0].numpy()\n",
" keypoint_scores = detections['detection_keypoint_scores'][0].numpy()\n",
"\n",
"viz_utils.visualize_boxes_and_labels_on_image_array(\n",
" image_np_with_detections,\n",
" detections['detection_boxes'][0].numpy(),\n",
" (detections['detection_classes'][0].numpy() + label_id_offset).astype(int),\n",
" detections['detection_scores'][0].numpy(),\n",
" category_index,\n",
" use_normalized_coordinates=True,\n",
" max_boxes_to_draw=200,\n",
" min_score_thresh=.30,\n",
" agnostic_mode=False,\n",
" keypoints=keypoints,\n",
" keypoint_scores=keypoint_scores,\n",
" keypoint_edges=get_keypoint_tuples(configs['eval_config']))\n",
"\n",
"plt.figure(figsize=(12,16))\n",
"plt.imshow(image_np_with_detections)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "lYnOxprty3TD"
},
"source": [
"## Digging into the model's intermediate predictions\n",
"\n",
"For this part we will assume that the detection model is a CenterNet model following Zhou et al (https://arxiv.org/abs/1904.07850). And more specifically, we will assume that `detection_model` is of type `meta_architectures.center_net_meta_arch.CenterNetMetaArch`.\n",
"\n",
"As one of its intermediate predictions, CenterNet produces a heatmap of box centers for each class (for example, it will produce a heatmap whose size is proportional to that of the image that lights up at the center of each, e.g., \"zebra\"). In the following, we will visualize these intermediate class center heatmap predictions."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "xBgYgSGMhHVi"
},
"outputs": [],
"source": [
"if detection_model.__class__.__name__ != 'CenterNetMetaArch':\n",
" raise AssertionError('The meta-architecture for this section '\n",
" 'is assumed to be CenterNetMetaArch!')\n",
"\n",
"def get_heatmap(predictions_dict, class_name):\n",
" \"\"\"Grabs class center logits and apply inverse logit transform.\n",
"\n",
" Args:\n",
" predictions_dict: dictionary of tensors containing a `object_center`\n",
" field of shape [1, heatmap_width, heatmap_height, num_classes]\n",
" class_name: string name of category (e.g., `horse`)\n",
"\n",
" Returns:\n",
" heatmap: 2d Tensor heatmap representing heatmap of centers for a given class\n",
" (For CenterNet, this is 128x128 or 256x256) with values in [0,1]\n",
" \"\"\"\n",
" class_index = label_map_dict[class_name]\n",
" class_center_logits = predictions_dict['object_center'][0]\n",
" class_center_logits = class_center_logits[0][\n",
" :, :, class_index - label_id_offset]\n",
" heatmap = tf.exp(class_center_logits) / (tf.exp(class_center_logits) + 1)\n",
" return heatmap\n",
"\n",
"def unpad_heatmap(heatmap, image_np):\n",
" \"\"\"Reshapes/unpads heatmap appropriately.\n",
"\n",
" Reshapes/unpads heatmap appropriately to match image_np.\n",
"\n",
" Args:\n",
" heatmap: Output of `get_heatmap`, a 2d Tensor\n",
" image_np: uint8 numpy array with shape (img_height, img_width, 3). Note\n",
" that due to padding, the relationship between img_height and img_width\n",
" might not be a simple scaling.\n",
"\n",
" Returns:\n",
" resized_heatmap_unpadded: a resized heatmap (2d Tensor) that is the same\n",
" size as `image_np`\n",
" \"\"\"\n",
" heatmap = tf.tile(tf.expand_dims(heatmap, 2), [1, 1, 3]) * 255\n",
" pre_strided_size = detection_model._stride * heatmap.shape[0]\n",
" resized_heatmap = tf.image.resize(\n",
" heatmap, [pre_strided_size, pre_strided_size],\n",
" method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n",
" resized_heatmap_unpadded = tf.slice(resized_heatmap, begin=[0,0,0], size=shapes)\n",
" return tf.image.resize(\n",
" resized_heatmap_unpadded,\n",
" [image_np.shape[0], image_np.shape[1]],\n",
" method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[:,:,0]\n",
"\n",
"\n",
"class_name = 'kite'\n",
"heatmap = get_heatmap(predictions_dict, class_name)\n",
"resized_heatmap_unpadded = unpad_heatmap(heatmap, image_np)\n",
"plt.figure(figsize=(12,16))\n",
"plt.imshow(image_np_with_detections)\n",
"plt.imshow(resized_heatmap_unpadded, alpha=0.7,vmin=0, vmax=160, cmap='viridis')\n",
"plt.title('Object center heatmap (class: ' + class_name + ')')\n",
"plt.show()\n",
"\n",
"class_name = 'person'\n",
"heatmap = get_heatmap(predictions_dict, class_name)\n",
"resized_heatmap_unpadded = unpad_heatmap(heatmap, image_np)\n",
"plt.figure(figsize=(12,16))\n",
"plt.imshow(image_np_with_detections)\n",
"plt.imshow(resized_heatmap_unpadded, alpha=0.7,vmin=0, vmax=160, cmap='viridis')\n",
"plt.title('Object center heatmap (class: ' + class_name + ')')\n",
"plt.show()"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"name": "inference_tf2_colab.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -95,7 +95,7 @@ ...@@ -95,7 +95,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -118,7 +118,7 @@ ...@@ -118,7 +118,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -164,7 +164,7 @@ ...@@ -164,7 +164,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -189,7 +189,7 @@ ...@@ -189,7 +189,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -224,7 +224,7 @@ ...@@ -224,7 +224,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -249,7 +249,7 @@ ...@@ -249,7 +249,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -300,7 +300,7 @@ ...@@ -300,7 +300,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -319,7 +319,6 @@ ...@@ -319,7 +319,6 @@
" model_dir = pathlib.Path(model_dir)/\"saved_model\"\n", " model_dir = pathlib.Path(model_dir)/\"saved_model\"\n",
"\n", "\n",
" model = tf.saved_model.load(str(model_dir))\n", " model = tf.saved_model.load(str(model_dir))\n",
" model = model.signatures['serving_default']\n",
"\n", "\n",
" return model" " return model"
] ]
...@@ -337,7 +336,7 @@ ...@@ -337,7 +336,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -362,7 +361,7 @@ ...@@ -362,7 +361,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -398,7 +397,7 @@ ...@@ -398,7 +397,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -417,12 +416,12 @@ ...@@ -417,12 +416,12 @@
"id": "yN1AYfAEJIGp" "id": "yN1AYfAEJIGp"
}, },
"source": [ "source": [
"Check the model's input signature, it expects a batch of 3-color images of type uint8: " "Check the model's input signature, it expects a batch of 3-color images of type uint8:"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -430,7 +429,7 @@ ...@@ -430,7 +429,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"print(detection_model.inputs)" "print(detection_model.signatures['serving_default'].inputs)"
] ]
}, },
{ {
...@@ -445,7 +444,7 @@ ...@@ -445,7 +444,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -453,12 +452,12 @@ ...@@ -453,12 +452,12 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"detection_model.output_dtypes" "detection_model.signatures['serving_default'].output_dtypes"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -466,7 +465,7 @@ ...@@ -466,7 +465,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"detection_model.output_shapes" "detection_model.signatures['serving_default'].output_shapes"
] ]
}, },
{ {
...@@ -481,7 +480,7 @@ ...@@ -481,7 +480,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -497,7 +496,8 @@ ...@@ -497,7 +496,8 @@
" input_tensor = input_tensor[tf.newaxis,...]\n", " input_tensor = input_tensor[tf.newaxis,...]\n",
"\n", "\n",
" # Run inference\n", " # Run inference\n",
" output_dict = model(input_tensor)\n", " model_fn = model.signatures['serving_default']\n",
" output_dict = model_fn(input_tensor)\n",
"\n", "\n",
" # All outputs are batches tensors.\n", " # All outputs are batches tensors.\n",
" # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n",
...@@ -535,7 +535,7 @@ ...@@ -535,7 +535,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -565,7 +565,7 @@ ...@@ -565,7 +565,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -589,7 +589,7 @@ ...@@ -589,7 +589,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -613,7 +613,7 @@ ...@@ -613,7 +613,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -626,7 +626,7 @@ ...@@ -626,7 +626,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 0, "execution_count": null,
"metadata": { "metadata": {
"colab": {}, "colab": {},
"colab_type": "code", "colab_type": "code",
...@@ -637,19 +637,6 @@ ...@@ -637,19 +637,6 @@
"for image_path in TEST_IMAGE_PATHS:\n", "for image_path in TEST_IMAGE_PATHS:\n",
" show_inference(masking_model, image_path)" " show_inference(masking_model, image_path)"
] ]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "nLlmm9JojEKm"
},
"outputs": [],
"source": [
""
]
} }
], ],
"metadata": { "metadata": {
...@@ -663,6 +650,10 @@ ...@@ -663,6 +650,10 @@
"name": "object_detection_tutorial.ipynb", "name": "object_detection_tutorial.ipynb",
"private_outputs": true, "private_outputs": true,
"provenance": [ "provenance": [
{
"file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/colab_tutorials/object_detection_tutorial.ipynb",
"timestamp": 1594335690840
},
{ {
"file_id": "1LNYL6Zsn9Xlil2CVNOTsgDZQSBKeOjCh", "file_id": "1LNYL6Zsn9Xlil2CVNOTsgDZQSBKeOjCh",
"timestamp": 1566498233247 "timestamp": 1566498233247
...@@ -699,8 +690,7 @@ ...@@ -699,8 +690,7 @@
"file_id": "https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb", "file_id": "https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb",
"timestamp": 1556150293326 "timestamp": 1556150293326
} }
], ]
"version": "0.3.2"
}, },
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
......
# CenterNet meta-architecture from the "Objects as Points" [2] paper with the
# hourglass[1] backbone.
# [1]: https://arxiv.org/abs/1603.06937
# [2]: https://arxiv.org/abs/1904.07850
# Trained on COCO, initialized from Extremenet Detection checkpoint
# Train on TPU-32 v3
#
# Achieves 44.6 mAP on COCO17 Val
model {
center_net {
num_classes: 90
feature_extractor {
type: "hourglass_104"
bgr_ordering: true
channel_means: [104.01362025, 114.03422265, 119.9165958 ]
channel_stds: [73.6027665 , 69.89082075, 70.9150767 ]
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 1024
max_dimension: 1024
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
}
}
train_config: {
batch_size: 128
num_steps: 50000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_adjust_brightness {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
optimizer {
adam_optimizer: {
epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default.
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 1e-3
total_steps: 50000
warmup_learning_rate: 2.5e-4
warmup_steps: 5000
}
}
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-1"
fine_tune_checkpoint_type: "detection"
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# CenterNet meta-architecture from the "Objects as Points" [2] paper with the
# hourglass[1] backbone.
# [1]: https://arxiv.org/abs/1603.06937
# [2]: https://arxiv.org/abs/1904.07850
# Trained on COCO, initialized from Extremenet Detection checkpoint
# Train on TPU-8
#
# Achieves 41.9 mAP on COCO17 Val
model {
center_net {
num_classes: 90
feature_extractor {
type: "hourglass_104"
bgr_ordering: true
channel_means: [104.01362025, 114.03422265, 119.9165958 ]
channel_stds: [73.6027665 , 69.89082075, 70.9150767 ]
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
}
}
train_config: {
batch_size: 128
num_steps: 140000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_aspect_ratio: 0.5
max_aspect_ratio: 1.7
random_coef: 0.25
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_adjust_brightness {
}
}
data_augmentation_options {
random_absolute_pad_image {
max_height_padding: 200
max_width_padding: 200
pad_color: [0, 0, 0]
}
}
optimizer {
adam_optimizer: {
epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default.
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 1e-3
schedule {
step: 90000
learning_rate: 1e-4
}
schedule {
step: 120000
learning_rate: 1e-5
}
}
}
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-1"
fine_tune_checkpoint_type: "detection"
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# CenterNet meta-architecture from the "Objects as Points" [1] paper
# with the ResNet-v1-101 FPN backbone.
# [1]: https://arxiv.org/abs/1904.07850
# Train on TPU-8
#
# Achieves 34.18 mAP on COCO17 Val
model {
center_net {
num_classes: 90
feature_extractor {
type: "resnet_v2_101"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
}
}
train_config: {
batch_size: 128
num_steps: 140000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_aspect_ratio: 0.5
max_aspect_ratio: 1.7
random_coef: 0.25
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_adjust_brightness {
}
}
data_augmentation_options {
random_absolute_pad_image {
max_height_padding: 200
max_width_padding: 200
pad_color: [0, 0, 0]
}
}
optimizer {
adam_optimizer: {
epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default.
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 1e-3
schedule {
step: 90000
learning_rate: 1e-4
}
schedule {
step: 120000
learning_rate: 1e-5
}
}
}
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/weights-1"
fine_tune_checkpoint_type: "classification"
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-101 (v1),
# w/high res inputs, long training schedule
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 37.1 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
fixed_shape_resizer {
width: 1024
height: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 100000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 100000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-50 (v1)
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 31.8 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 640
max_dimension: 640
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet101_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-101 (v1),
# Initialized from Imagenet classification checkpoint
#
# Train on GPU-8
#
# Achieves 36.6 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 800
max_dimension: 1333
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet101_keras'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 16
num_steps: 200000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.01
total_steps: 200000
warmup_learning_rate: 0.0
warmup_steps: 5000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-152 (v1)
# w/high res inputs, long training schedule
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 37.6 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
fixed_shape_resizer {
width: 1024
height: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet152_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 100000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 100000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-152 (v1)
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 32.4 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 640
max_dimension: 640
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet152_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-152 (v1),
# Initialized from Imagenet classification checkpoint
#
# Train on GPU-8
#
# Achieves 37.3 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 800
max_dimension: 1333
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet152_keras'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 16
num_steps: 200000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.01
total_steps: 200000
warmup_learning_rate: 0.0
warmup_steps: 5000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-50 (v1),
# w/high res inputs, long training schedule
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 31.0 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
fixed_shape_resizer {
width: 1024
height: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet50_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 100000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 100000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-50 (v1) with 640x640 input resolution
# Trained on COCO, initialized from Imagenet classification checkpoint
#
# Train on TPU-8
#
# Achieves 29.3 mAP on COCO17 Val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 640
max_dimension: 640
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet50_keras'
batch_norm_trainable: true
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
share_box_across_classes: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
use_static_shapes: true
use_matmul_crop_and_resize: true
clip_anchors_to_image: true
use_static_balanced_label_sampler: true
use_matmul_gather_in_matcher: true
}
}
train_config: {
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
use_bfloat16: true # works only on TPUs
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Faster R-CNN with Resnet-50 (v1),
# Initialized from Imagenet classification checkpoint
#
# Train on GPU-8
#
# Achieves 31.4 mAP on COCO17 val
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 800
max_dimension: 1333
pad_to_max_dimension: true
}
}
feature_extractor {
type: 'faster_rcnn_resnet50_keras'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 16
num_steps: 200000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.01
total_steps: 200000
warmup_learning_rate: 0.0
warmup_steps: 5000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_adjust_hue {
}
}
data_augmentation_options {
random_adjust_contrast {
}
}
data_augmentation_options {
random_adjust_saturation {
}
}
data_augmentation_options {
random_square_crop_by_scale {
scale_min: 0.6
scale_max: 1.3
}
}
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
batch_size: 1;
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
}
# Mask R-CNN with Inception Resnet v2 (no atrous)
# Sync-trained on COCO (with 8 GPUs) with batch size 16 (1024x1024 resolution)
# Initialized from Imagenet classification checkpoint
#
# Train on GPU-8
#
# Achieves 40.4 box mAP and 35.5 mask mAP on COCO17 val
model {
faster_rcnn {
number_of_stages: 3
num_classes: 90
image_resizer {
fixed_shape_resizer {
height: 1024
width: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2_keras'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
mask_height: 33
mask_width: 33
mask_prediction_conv_depth: 0
mask_prediction_num_conv_layers: 4
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
predict_instance_masks: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
second_stage_mask_prediction_loss_weight: 4.0
resize_masks: false
}
}
train_config: {
batch_size: 16
num_steps: 200000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.008
total_steps: 200000
warmup_learning_rate: 0.0
warmup_steps: 5000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/inception_resnet_v2.ckpt-1"
fine_tune_checkpoint_type: "classification"
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord"
}
load_instance_masks: true
mask_type: PNG_MASKS
}
eval_config: {
metrics_set: "coco_detection_metrics"
metrics_set: "coco_mask_metrics"
eval_instance_masks: true
use_moving_averages: false
batch_size: 1
include_metrics_per_category: true
}
eval_input_reader: {
label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord"
}
load_instance_masks: true
mask_type: PNG_MASKS
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment