Unverified Commit e1ae37c4 authored by aquariusjay's avatar aquariusjay Committed by GitHub
Browse files

Open-source FEELVOS model, which was developed by Paul Voigtlaender during his...

Open-source FEELVOS model, which was developed by Paul Voigtlaender during his 2018 summer internship at Google. The work has been accepted to CVPR 2019. (#6274)
parent 5274ec8b
#!/bin/bash
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script is used to run local training on DAVIS 2017. Users could also
# modify from this script for their use case. See eval.sh for an example of
# local inference with a pre-trained model.
#
# Note that this script runs local training with a single GPU and a smaller crop
# and batch size, while in the paper, we trained our models with 16 GPUS with
# --num_clones=2, --train_batch_size=6, --num_replicas=8,
# --training_number_of_steps=200000, --train_crop_size=465,
# --train_crop_size=465.
#
# Usage:
# # From the tensorflow/models/research/feelvos directory.
# sh ./train.sh
#
#
# Exit immediately if a command exits with a non-zero status.
set -e
# Move one-level up to tensorflow/models/research directory.
cd ..
# Update PYTHONPATH.
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim:`pwd`/feelvos
# Set up the working environment.
CURRENT_DIR=$(pwd)
WORK_DIR="${CURRENT_DIR}/feelvos"
# Set up the working directories.
DATASET_DIR="datasets"
DAVIS_FOLDER="davis17"
DAVIS_DATASET="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/tfrecord"
EXP_FOLDER="exp/train"
TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/${EXP_FOLDER}/train"
mkdir -p ${TRAIN_LOGDIR}
# Go to datasets folder and download and convert the DAVIS 2017 dataset.
DATASET_DIR="datasets"
cd "${WORK_DIR}/${DATASET_DIR}"
sh download_and_convert_davis17.sh
# Go to models folder and download and unpack the COCO pre-trained model.
MODELS_DIR="models"
mkdir -p "${WORK_DIR}/${MODELS_DIR}"
cd "${WORK_DIR}/${MODELS_DIR}"
if [ ! -d "xception_65_coco_pretrained" ]; then
wget http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz
tar -xvf xception_65_coco_pretrained_2018_10_02.tar.gz
rm xception_65_coco_pretrained_2018_10_02.tar.gz
fi
INIT_CKPT="${WORK_DIR}/${MODELS_DIR}/xception_65_coco_pretrained/x65-b2u1s2p-d48-2-3x256-sc-cr300k_init.ckpt"
# Go back to orignal directory.
cd "${CURRENT_DIR}"
python "${WORK_DIR}"/train.py \
--dataset=davis_2017 \
--dataset_dir="${DAVIS_DATASET}" \
--train_logdir="${TRAIN_LOGDIR}" \
--tf_initial_checkpoint="${INIT_CKPT}" \
--logtostderr \
--atrous_rates=6 \
--atrous_rates=12 \
--atrous_rates=18 \
--decoder_output_stride=4 \
--model_variant=xception_65 \
--multi_grid=1 \
--multi_grid=1 \
--multi_grid=1 \
--output_stride=16 \
--weight_decay=0.00004 \
--num_clones=1 \
--train_batch_size=1 \
--train_crop_size=300 \
--train_crop_size=300
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
This diff is collapsed.
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedding utils."""
import unittest
import numpy as np
import tensorflow as tf
from feelvos.utils import embedding_utils
if embedding_utils.USE_CORRELATION_COST:
# pylint: disable=g-import-not-at-top
from correlation_cost.python.ops import correlation_cost_op
class EmbeddingUtilsTest(tf.test.TestCase):
def test_pairwise_distances(self):
x = np.arange(100, dtype=np.float32).reshape(20, 5)
y = np.arange(100, 200, dtype=np.float32).reshape(20, 5)
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
x = tf.constant(x)
y = tf.constant(y)
d1 = embedding_utils.pairwise_distances(x, y)
d2 = embedding_utils.pairwise_distances2(x, y)
d1_val, d2_val = sess.run([d1, d2])
self.assertAllClose(d1_val, d2_val)
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_correlation_cost_one_dimensional(self):
a = np.array([[[[1.0], [2.0]], [[3.0], [4.0]]]])
b = np.array([[[[2.0], [1.0]], [[4.0], [3.0]]]])
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
c = correlation_cost_op.correlation_cost(
a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1,
pad=1)
c = tf.squeeze(c, axis=0)
c_val = sess.run(c)
self.assertAllEqual(c_val.shape, (2, 2, 9))
for y in range(2):
for x in range(2):
for dy in range(-1, 2):
for dx in range(-1, 2):
a_slice = a[0, y, x, 0]
if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1:
b_slice = 0
else:
b_slice = b[0, y + dy, x + dx, 0]
expected = a_slice * b_slice
dy0 = dy + 1
dx0 = dx + 1
self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected)
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_correlation_cost_two_dimensional(self):
a = np.array([[[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]]])
b = np.array([[[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]]])
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
c = correlation_cost_op.correlation_cost(
a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1,
pad=1)
c = tf.squeeze(c, axis=0)
c_val = sess.run(c)
self.assertAllEqual(c_val.shape, (2, 2, 9))
for y in range(2):
for x in range(2):
for dy in range(-1, 2):
for dx in range(-1, 2):
a_slice = a[0, y, x, :]
if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1:
b_slice = 0
else:
b_slice = b[0, y + dy, x + dx, :]
expected = (a_slice * b_slice).mean()
dy0 = dy + 1
dx0 = dx + 1
self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected)
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_local_pairwise_distances_one_dimensional(self):
a = np.array([[[1.0], [2.0]], [[3.0], [4.0]]])
b = np.array([[[2.0], [1.0]], [[4.0], [3.0]]])
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
a_tf = tf.constant(a, dtype=tf.float32)
b_tf = tf.constant(b, dtype=tf.float32)
d = embedding_utils.local_pairwise_distances(a_tf, b_tf,
max_distance=1)
d_val = sess.run(d)
for y in range(2):
for x in range(2):
for dy in range(-1, 2):
for dx in range(-1, 2):
a_slice = a[y, x, 0]
if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1:
expected = np.float('inf')
else:
b_slice = b[y + dy, x + dx, 0]
expected = (a_slice - b_slice) ** 2
dy0 = dy + 1
dx0 = dx + 1
self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected)
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_local_pairwise_distances_shape(self):
a = np.zeros((4, 5, 2))
b = np.zeros((4, 5, 2))
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
a_tf = tf.constant(a, dtype=tf.float32)
b_tf = tf.constant(b, dtype=tf.float32)
d = embedding_utils.local_pairwise_distances(a_tf, b_tf, max_distance=4)
d_val = sess.run(d)
self.assertAllEqual(d_val.shape, (4, 5, 81))
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_local_pairwise_distances_two_dimensional(self):
a = np.array([[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]])
b = np.array([[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]])
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
a_tf = tf.constant(a, dtype=tf.float32)
b_tf = tf.constant(b, dtype=tf.float32)
d = embedding_utils.local_pairwise_distances(a_tf, b_tf,
max_distance=1)
d_val = sess.run(d)
for y in range(2):
for x in range(2):
for dy in range(-1, 2):
for dx in range(-1, 2):
a_slice = a[y, x, :]
if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1:
expected = np.float('inf')
else:
b_slice = b[y + dy, x + dx, :]
expected = ((a_slice - b_slice) ** 2).sum()
dy0 = dy + 1
dx0 = dx + 1
self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected)
@unittest.skipIf(not embedding_utils.USE_CORRELATION_COST,
'depends on correlation_cost')
def test_local_previous_frame_nearest_neighbor_features_per_object(self):
prev_frame_embedding = np.array([[[1.0, -5.0], [7.0, 2.0]],
[[1.0, 3.0], [3.0, 4.0]]]) / 10
query_embedding = np.array([[[2.0, 1.0], [0.0, -9.0]],
[[4.0, 3.0], [3.0, 1.0]]]) / 10
prev_frame_labels = np.array([[[0], [1]], [[1], [0]]])
gt_ids = np.array([0, 1])
g = tf.Graph()
with g.as_default():
with self.test_session(graph=g) as sess:
prev_frame_embedding_tf = tf.constant(prev_frame_embedding,
dtype=tf.float32)
query_embedding_tf = tf.constant(query_embedding, dtype=tf.float32)
embu = embedding_utils
dists = (
embu.local_previous_frame_nearest_neighbor_features_per_object(
prev_frame_embedding_tf, query_embedding_tf,
prev_frame_labels, gt_ids, max_distance=1))
dists = tf.squeeze(dists, axis=4)
dists = tf.squeeze(dists, axis=0)
dists_val = sess.run(dists)
for obj_id in gt_ids:
for y in range(2):
for x in range(2):
curr_min = 1.0
for dy in range(-1, 2):
for dx in range(-1, 2):
# Attention: here we shift the prev frame embedding,
# not the query.
if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1:
continue
if prev_frame_labels[y + dy, x + dx, 0] != obj_id:
continue
prev_frame_slice = prev_frame_embedding[y + dy, x + dx, :]
query_frame_slice = query_embedding[y, x, :]
v_unnorm = ((prev_frame_slice - query_frame_slice) ** 2).sum()
v = ((1.0 / (1.0 + np.exp(-v_unnorm))) - 0.5) * 2
curr_min = min(curr_min, v)
expected = curr_min
self.assertAlmostEqual(dists_val[y, x, obj_id], expected,
places=5)
if __name__ == '__main__':
tf.test.main()
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for evaluations."""
import numpy as np
import PIL
import tensorflow as tf
pascal_colormap = [
0, 0, 0,
0.5020, 0, 0,
0, 0.5020, 0,
0.5020, 0.5020, 0,
0, 0, 0.5020,
0.5020, 0, 0.5020,
0, 0.5020, 0.5020,
0.5020, 0.5020, 0.5020,
0.2510, 0, 0,
0.7529, 0, 0,
0.2510, 0.5020, 0,
0.7529, 0.5020, 0,
0.2510, 0, 0.5020,
0.7529, 0, 0.5020,
0.2510, 0.5020, 0.5020,
0.7529, 0.5020, 0.5020,
0, 0.2510, 0,
0.5020, 0.2510, 0,
0, 0.7529, 0,
0.5020, 0.7529, 0,
0, 0.2510, 0.5020,
0.5020, 0.2510, 0.5020,
0, 0.7529, 0.5020,
0.5020, 0.7529, 0.5020,
0.2510, 0.2510, 0]
def save_segmentation_with_colormap(filename, img):
"""Saves a segmentation with the pascal colormap as expected for DAVIS eval.
Args:
filename: Where to store the segmentation.
img: A numpy array of the segmentation to be saved.
"""
if img.shape[-1] == 1:
img = img[..., 0]
# Save with colormap.
colormap = (np.array(pascal_colormap) * 255).round().astype('uint8')
colormap_image = PIL.Image.new('P', (16, 16))
colormap_image.putpalette(colormap)
pil_image = PIL.Image.fromarray(img.astype('uint8'))
pil_image_with_colormap = pil_image.quantize(palette=colormap_image)
with tf.gfile.GFile(filename, 'w') as f:
pil_image_with_colormap.save(f)
def save_embeddings(filename, embeddings):
with tf.gfile.GFile(filename, 'w') as f:
np.save(f, embeddings)
def calculate_iou(pred_labels, ref_labels):
"""Calculates the intersection over union for binary segmentation.
Args:
pred_labels: predicted segmentation labels.
ref_labels: reference segmentation labels.
Returns:
The IoU between pred_labels and ref_labels
"""
if ref_labels.any():
i = np.logical_and(pred_labels, ref_labels).sum()
u = np.logical_or(pred_labels, ref_labels).sum()
return i.astype('float') / u
else:
if pred_labels.any():
return 0.0
else:
return 1.0
def calculate_multi_object_miou_tf(pred_labels, ref_labels):
"""Calculates the mIoU for a batch of predicted and reference labels.
Args:
pred_labels: Int32 tensor of shape [batch, height, width, 1].
ref_labels: Int32 tensor of shape [batch, height, width, 1].
Returns:
The mIoU between pred_labels and ref_labels as float32 scalar tensor.
"""
def calculate_multi_object_miou(pred_labels_, ref_labels_):
"""Calculates the mIoU for predicted and reference labels in numpy.
Args:
pred_labels_: int32 np.array of shape [batch, height, width, 1].
ref_labels_: int32 np.array of shape [batch, height, width, 1].
Returns:
The mIoU between pred_labels_ and ref_labels_.
"""
assert len(pred_labels_.shape) == 4
assert pred_labels_.shape[3] == 1
assert pred_labels_.shape == ref_labels_.shape
ious = []
for pred_label, ref_label in zip(pred_labels_, ref_labels_):
ids = np.setdiff1d(np.unique(ref_label), [0])
if ids.size == 0:
continue
for id_ in ids:
iou = calculate_iou(pred_label == id_, ref_label == id_)
ious.append(iou)
if ious:
return np.cast['float32'](np.mean(ious))
else:
return np.cast['float32'](1.0)
miou = tf.py_func(calculate_multi_object_miou, [pred_labels, ref_labels],
tf.float32, name='calculate_multi_object_miou')
miou.set_shape(())
return miou
def calculate_multi_object_ious(pred_labels, ref_labels, label_set):
"""Calculates the intersection over union for binary segmentation.
Args:
pred_labels: predicted segmentation labels.
ref_labels: reference segmentation labels.
label_set: int np.array of object ids.
Returns:
float np.array of IoUs between pred_labels and ref_labels
for each object in label_set.
"""
# Background should not be included as object label.
return np.array([calculate_iou(pred_labels == label, ref_labels == label)
for label in label_set if label != 0])
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for artificially damaging segmentation masks."""
import numpy as np
from scipy.ndimage import interpolation
from skimage import morphology
from skimage import transform
import tensorflow as tf
def damage_masks(labels, shift=True, scale=True, rotate=True, dilate=True):
"""Damages segmentation masks by random transformations.
Args:
labels: Int32 labels tensor of shape (height, width, 1).
shift: Boolean, whether to damage the masks by shifting.
scale: Boolean, whether to damage the masks by scaling.
rotate: Boolean, whether to damage the masks by rotation.
dilate: Boolean, whether to damage the masks by dilation.
Returns:
The damaged version of labels.
"""
def _damage_masks_np(labels_):
return damage_masks_np(labels_, shift, scale, rotate, dilate)
damaged_masks = tf.py_func(_damage_masks_np, [labels], tf.int32,
name='damage_masks')
damaged_masks.set_shape(labels.get_shape())
return damaged_masks
def damage_masks_np(labels, shift=True, scale=True, rotate=True, dilate=True):
"""Performs the actual mask damaging in numpy.
Args:
labels: Int32 numpy array of shape (height, width, 1).
shift: Boolean, whether to damage the masks by shifting.
scale: Boolean, whether to damage the masks by scaling.
rotate: Boolean, whether to damage the masks by rotation.
dilate: Boolean, whether to damage the masks by dilation.
Returns:
The damaged version of labels.
"""
unique_labels = np.unique(labels)
unique_labels = np.setdiff1d(unique_labels, [0])
# Shuffle to get random depth ordering when combining together.
np.random.shuffle(unique_labels)
damaged_labels = np.zeros_like(labels)
for l in unique_labels:
obj_mask = (labels == l)
damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale,
rotate, dilate)
damaged_labels[damaged_obj_mask] = l
return damaged_labels
def _damage_single_object_mask(mask, shift, scale, rotate, dilate):
"""Performs mask damaging in numpy for a single object.
Args:
mask: Boolean numpy array of shape(height, width, 1).
shift: Boolean, whether to damage the masks by shifting.
scale: Boolean, whether to damage the masks by scaling.
rotate: Boolean, whether to damage the masks by rotation.
dilate: Boolean, whether to damage the masks by dilation.
Returns:
The damaged version of mask.
"""
# For now we just do shifting and scaling. Better would be Affine or thin
# spline plate transformations.
if shift:
mask = _shift_mask(mask)
if scale:
mask = _scale_mask(mask)
if rotate:
mask = _rotate_mask(mask)
if dilate:
mask = _dilate_mask(mask)
return mask
def _shift_mask(mask, max_shift_factor=0.05):
"""Damages a mask for a single object by randomly shifting it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
max_shift_factor: Float scalar, the maximum factor for random shifting.
Returns:
The shifted version of mask.
"""
nzy, nzx, _ = mask.nonzero()
h = nzy.max() - nzy.min()
w = nzx.max() - nzx.min()
size = np.sqrt(h * w)
offset = np.random.uniform(-size * max_shift_factor, size * max_shift_factor,
2)
shifted_mask = interpolation.shift(np.squeeze(mask, axis=2),
offset, order=0).astype('bool')[...,
np.newaxis]
return shifted_mask
def _scale_mask(mask, scale_amount=0.025):
"""Damages a mask for a single object by randomly scaling it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
scale_amount: Float scalar, the maximum factor for random scaling.
Returns:
The scaled version of mask.
"""
nzy, nzx, _ = mask.nonzero()
cy = 0.5 * (nzy.max() - nzy.min())
cx = 0.5 * (nzx.max() - nzx.min())
scale_factor = np.random.uniform(1.0 - scale_amount, 1.0 + scale_amount)
shift = transform.SimilarityTransform(translation=[-cx, -cy])
inv_shift = transform.SimilarityTransform(translation=[cx, cy])
s = transform.SimilarityTransform(scale=[scale_factor, scale_factor])
m = (shift + (s + inv_shift)).inverse
scaled_mask = transform.warp(mask, m) > 0.5
return scaled_mask
def _rotate_mask(mask, max_rot_degrees=3.0):
"""Damages a mask for a single object by randomly rotating it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
max_rot_degrees: Float scalar, the maximum number of degrees to rotate.
Returns:
The scaled version of mask.
"""
cy = 0.5 * mask.shape[0]
cx = 0.5 * mask.shape[1]
rot_degrees = np.random.uniform(-max_rot_degrees, max_rot_degrees)
shift = transform.SimilarityTransform(translation=[-cx, -cy])
inv_shift = transform.SimilarityTransform(translation=[cx, cy])
r = transform.SimilarityTransform(rotation=np.deg2rad(rot_degrees))
m = (shift + (r + inv_shift)).inverse
scaled_mask = transform.warp(mask, m) > 0.5
return scaled_mask
def _dilate_mask(mask, dilation_radius=5):
"""Damages a mask for a single object by dilating it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
dilation_radius: Integer, the radius of the used disk structure element.
Returns:
The dilated version of mask.
"""
disk = morphology.disk(dilation_radius, dtype=np.bool)
dilated_mask = morphology.binary_dilation(
np.squeeze(mask, axis=2), selem=disk)[..., np.newaxis]
return dilated_mask
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
import collections
import six
import tensorflow as tf
from deeplab.core import preprocess_utils
from deeplab.utils import train_utils
from feelvos.utils import embedding_utils
from feelvos.utils import eval_utils
slim = tf.contrib.slim
add_softmax_cross_entropy_loss_for_each_scale = (
train_utils.add_softmax_cross_entropy_loss_for_each_scale)
get_model_gradient_multipliers = train_utils.get_model_gradient_multipliers
get_model_learning_rate = train_utils.get_model_learning_rate
resolve_shape = preprocess_utils.resolve_shape
def add_triplet_loss_for_each_scale(batch_size, num_frames_per_video,
embedding_dim, scales_to_embeddings,
labels, scope):
"""Adds triplet loss for logits of each scale.
Args:
batch_size: Int, the number of video chunks sampled per batch
num_frames_per_video: Int, the number of frames per video.
embedding_dim: Int, the dimension of the learned embedding
scales_to_embeddings: A map from embedding names for different scales to
embeddings. The embeddings have shape [batch, embeddings_height,
embeddings_width, embedding_dim].
labels: Groundtruth labels with shape [batch, image_height, image_width, 1].
scope: String, the scope for the loss.
Raises:
ValueError: labels is None.
"""
if labels is None:
raise ValueError('No label for triplet loss.')
for scale, embeddings in scales_to_embeddings.iteritems():
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
# Label is downsampled to the same size as logits.
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
resolve_shape(embeddings, 4)[1:3],
align_corners=True)
# Reshape from [batch * num_frames, ...] to [batch, num_frames, ...].
h = tf.shape(embeddings)[1]
w = tf.shape(embeddings)[2]
new_labels_shape = tf.stack([batch_size, num_frames_per_video, h, w, 1])
reshaped_labels = tf.reshape(scaled_labels, new_labels_shape)
new_embeddings_shape = tf.stack([batch_size, num_frames_per_video, h, w,
-1])
reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape)
with tf.name_scope(loss_scope):
total_loss = tf.constant(0, dtype=tf.float32)
for n in range(batch_size):
embedding = reshaped_embeddings[n]
label = reshaped_labels[n]
n_pixels = h * w
n_anchors_used = 256
sampled_anchor_indices = tf.random_shuffle(tf.range(n_pixels))[
:n_anchors_used]
anchors_pool = tf.reshape(embedding[0], [-1, embedding_dim])
anchors_pool_classes = tf.reshape(label[0], [-1])
anchors = tf.gather(anchors_pool, sampled_anchor_indices)
anchor_classes = tf.gather(anchors_pool_classes, sampled_anchor_indices)
pos_neg_pool = tf.reshape(embedding[1:], [-1, embedding_dim])
pos_neg_pool_classes = tf.reshape(label[1:], [-1])
dists = embedding_utils.pairwise_distances(anchors, pos_neg_pool)
pos_mask = tf.equal(anchor_classes[:, tf.newaxis],
pos_neg_pool_classes[tf.newaxis, :])
neg_mask = tf.logical_not(pos_mask)
pos_mask_f = tf.cast(pos_mask, tf.float32)
neg_mask_f = tf.cast(neg_mask, tf.float32)
pos_dists = pos_mask_f * dists + 1e20 * neg_mask_f
neg_dists = neg_mask_f * dists + 1e20 * pos_mask_f
pos_dists_min = tf.reduce_min(pos_dists, axis=1)
neg_dists_min = tf.reduce_min(neg_dists, axis=1)
margin = 1.0
loss = tf.nn.relu(pos_dists_min - neg_dists_min + margin)
# Handle case that no positive is present (per anchor).
any_pos = tf.reduce_any(pos_mask, axis=1)
loss *= tf.cast(any_pos, tf.float32)
# Average over anchors
loss = tf.reduce_mean(loss, axis=0)
total_loss += loss
total_loss /= batch_size
# Scale the loss up a bit.
total_loss *= 3.0
tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss)
def add_dynamic_softmax_cross_entropy_loss_for_each_scale(
scales_to_logits, labels, ignore_label, loss_weight=1.0,
upsample_logits=True, scope=None, top_k_percent_pixels=1.0,
hard_example_mining_step=100000):
"""Adds softmax cross entropy loss per scale for logits with varying classes.
Also adds summaries for mIoU.
Args:
scales_to_logits: A map from logits names for different scales to logits.
The logits are a list of length batch_size of tensors of shape
[time, logits_height, logits_width, num_classes].
labels: Groundtruth labels with shape [batch_size * time, image_height,
image_width, 1].
ignore_label: Integer, label to ignore.
loss_weight: Float, loss weight.
upsample_logits: Boolean, upsample logits or not.
scope: String, the scope for the loss.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its
value < 1.0, only compute the loss for the top k percent pixels (e.g.,
the top 20% pixels). This is useful for hard pixel mining.
hard_example_mining_step: An integer, the training step in which the
hard exampling mining kicks off. Note that we gradually reduce the
mining percent to the top_k_percent_pixels. For example, if
hard_example_mining_step=100K and top_k_percent_pixels=0.25, then
mining percent will gradually reduce from 100% to 25% until 100K steps
after which we only mine top 25% pixels.
Raises:
ValueError: Label or logits is None.
"""
if labels is None:
raise ValueError('No label for softmax cross entropy loss.')
if top_k_percent_pixels < 0 or top_k_percent_pixels > 1:
raise ValueError('Unexpected value of top_k_percent_pixels.')
for scale, logits in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
if upsample_logits:
# Label is not downsampled, and instead we upsample logits.
assert isinstance(logits, collections.Sequence)
logits = [tf.image.resize_bilinear(
x,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True) for x in logits]
scaled_labels = labels
else:
# Label is downsampled to the same size as logits.
assert isinstance(logits, collections.Sequence)
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
preprocess_utils.resolve_shape(logits[0], 4)[1:3],
align_corners=True)
batch_size = len(logits)
num_time = preprocess_utils.resolve_shape(logits[0])[0]
reshaped_labels = tf.reshape(
scaled_labels, ([batch_size, num_time] +
preprocess_utils.resolve_shape(scaled_labels)[1:]))
for n, logits_n in enumerate(logits):
labels_n = reshaped_labels[n]
labels_n = tf.reshape(labels_n, shape=[-1])
not_ignore_mask = tf.to_float(tf.not_equal(labels_n,
ignore_label)) * loss_weight
num_classes_n = tf.shape(logits_n)[-1]
one_hot_labels = slim.one_hot_encoding(
labels_n, num_classes_n, on_value=1.0, off_value=0.0)
logits_n_flat = tf.reshape(logits_n, shape=[-1, num_classes_n])
if top_k_percent_pixels == 1.0:
tf.losses.softmax_cross_entropy(
one_hot_labels,
logits_n_flat,
weights=not_ignore_mask,
scope=loss_scope)
else:
# Only compute the loss for top k percent pixels.
# First, compute the loss for all pixels. Note we do not put the loss
# to loss_collection and set reduction = None to keep the shape.
num_pixels = tf.to_float(tf.shape(logits_n_flat)[0])
pixel_losses = tf.losses.softmax_cross_entropy(
one_hot_labels,
logits_n_flat,
weights=not_ignore_mask,
scope='pixel_losses',
loss_collection=None,
reduction=tf.losses.Reduction.NONE)
# Compute the top_k_percent pixels based on current training step.
if hard_example_mining_step == 0:
# Directly focus on the top_k pixels.
top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels)
else:
# Gradually reduce the mining percent to top_k_percent_pixels.
global_step = tf.to_float(tf.train.get_or_create_global_step())
ratio = tf.minimum(1.0, global_step / hard_example_mining_step)
top_k_pixels = tf.to_int32(
(ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels)
_, top_k_indices = tf.nn.top_k(pixel_losses,
k=top_k_pixels,
sorted=True,
name='top_k_percent_pixels')
# Compute the loss for the top k percent pixels.
tf.losses.softmax_cross_entropy(
tf.gather(one_hot_labels, top_k_indices),
tf.gather(logits_n_flat, top_k_indices),
weights=tf.gather(not_ignore_mask, top_k_indices),
scope=loss_scope)
pred_n = tf.argmax(logits_n, axis=-1, output_type=tf.int32)[
..., tf.newaxis]
labels_n = labels[n * num_time: (n + 1) * num_time]
miou = eval_utils.calculate_multi_object_miou_tf(pred_n, labels_n)
tf.summary.scalar('miou', miou)
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list)
if variables_to_restore:
return slim.assign_from_checkpoint_fn(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
return None
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment