Unverified Commit 09d9656f authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'panoptic-segmentation' into panoptic-deeplab-modeling

parents ac671306 49a5706c
......@@ -15,7 +15,7 @@
"""Preprocessing ops."""
import math
from typing import Optional
from typing import Optional, Tuple, Union
from six.moves import range
import tensorflow as tf
......@@ -303,6 +303,86 @@ def resize_and_crop_image_v2(image,
return output_image, image_info
def resize_image(
image: tf.Tensor,
size: Union[Tuple[int, int], int],
max_size: Optional[int] = None,
method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR):
"""Resize image with size and max_size.
Args:
image: the image to be resized.
size: if list to tuple, resize to it. If scalar, we keep the same
aspect ratio and resize the short side to the value.
max_size: only used when size is a scalar. When the larger side is larger
than max_size after resized with size we used max_size to keep the aspect
ratio instead.
method: the method argument passed to tf.image.resize.
Returns:
the resized image and image_info to be used for downstream processing.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [resized_height, resized_width],
[y_scale, x_scale], [0, 0]], where [resized_height, resized_width]
is the actual scaled image size, and [y_scale, x_scale] is the
scaling factor, which is the ratio of
scaled dimension / original dimension.
"""
def get_size_with_aspect_ratio(image_size, size, max_size=None):
h = image_size[0]
w = image_size[1]
if max_size is not None:
min_original_size = tf.cast(tf.math.minimum(w, h), dtype=tf.float32)
max_original_size = tf.cast(tf.math.maximum(w, h), dtype=tf.float32)
if max_original_size / min_original_size * size > max_size:
size = tf.cast(
tf.math.floor(max_size * min_original_size / max_original_size),
dtype=tf.int32)
else:
size = tf.cast(size, tf.int32)
else:
size = tf.cast(size, tf.int32)
if (w <= h and w == size) or (h <= w and h == size):
return tf.stack([h, w])
if w < h:
ow = size
oh = tf.cast(
(tf.cast(size, dtype=tf.float32) * tf.cast(h, dtype=tf.float32) /
tf.cast(w, dtype=tf.float32)),
dtype=tf.int32)
else:
oh = size
ow = tf.cast(
(tf.cast(size, dtype=tf.float32) * tf.cast(w, dtype=tf.float32) /
tf.cast(h, dtype=tf.float32)),
dtype=tf.int32)
return tf.stack([oh, ow])
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
orignal_size = tf.shape(image)[0:2]
size = get_size(orignal_size, size, max_size)
rescaled_image = tf.image.resize(
image, tf.cast(size, tf.int32), method=method)
image_scale = size / orignal_size
image_info = tf.stack([
tf.cast(orignal_size, dtype=tf.float32),
tf.cast(size, dtype=tf.float32),
tf.cast(image_scale, tf.float32),
tf.constant([0.0, 0.0], dtype=tf.float32)
])
return rescaled_image, image_info
def center_crop_image(image):
"""Center crop a square shape slice from the input image.
......
......@@ -225,6 +225,22 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
np.random.randint(low=0, high=num_boxes, size=(num_boxes,)), tf.int64)
_ = preprocess_ops.random_crop(image, boxes, labels)
@parameterized.parameters(
((640, 640, 3), (1000, 1000), None, (1000, 1000, 3)),
((1280, 640, 3), 320, None, (640, 320, 3)),
((640, 1280, 3), 320, None, (320, 640, 3)),
((640, 640, 3), 320, 100, (100, 100, 3)))
def test_resize_image(self, input_shape, size, max_size, expected_shape):
resized_img, image_info = preprocess_ops.resize_image(
tf.zeros((input_shape)), size, max_size)
self.assertAllEqual(tf.shape(resized_img), expected_shape)
self.assertAllEqual(image_info[0], input_shape[:-1])
self.assertAllEqual(image_info[1], expected_shape[:-1])
self.assertAllEqual(
image_info[2],
np.array(expected_shape[:-1]) / np.array(input_shape[:-1]))
self.assertAllEqual(image_info[3], [0, 0])
if __name__ == '__main__':
tf.test.main()
# AssembleNet and AssembleNet++
This repository is the official implementations of the following papers.
[![Paper](http://img.shields.io/badge/Paper-arXiv.2008.03800-B3181B?logo=arXiv)](https://arxiv.org/abs/1905.13209)
[AssembleNet: Searching for Multi-Stream Neural Connectivity in Video
Architectures](https://arxiv.org/abs/1905.13209)
[![Paper](http://img.shields.io/badge/Paper-arXiv.2008.08072-B3181B?logo=arXiv)](https://arxiv.org/abs/1905.13209)
[AssembleNet++: Assembling Modality Representations via Attention
Connections](https://arxiv.org/abs/2008.08072)
**DISCLAIMER**: AssembleNet++ implementation is still under development.
No support will be provided during the development phase.
......@@ -48,7 +48,6 @@ HOURGLASS_SPECS = {
}
@tf.keras.utils.register_keras_serializable(package='centernet')
class Hourglass(tf.keras.Model):
"""CenterNet Hourglass backbone."""
......
......@@ -21,7 +21,6 @@ import tensorflow as tf
from official.vision.beta.projects.centernet.modeling.layers import cn_nn_blocks
@tf.keras.utils.register_keras_serializable(package='centernet')
class CenterNetHead(tf.keras.Model):
"""CenterNet Head."""
......
......@@ -123,7 +123,6 @@ def _make_repeated_residual_blocks(
return tf.keras.Sequential(blocks)
@tf.keras.utils.register_keras_serializable(package='centernet')
class HourglassBlock(tf.keras.layers.Layer):
"""Hourglass module: an encoder-decoder block."""
......@@ -274,7 +273,6 @@ class HourglassBlock(tf.keras.layers.Layer):
return config
@tf.keras.utils.register_keras_serializable(package='centernet')
class CenterNetHeadConv(tf.keras.layers.Layer):
"""Convolution block for the CenterNet head."""
......
......@@ -30,7 +30,6 @@ from official.vision.beta.projects.centernet.ops import loss_ops
from official.vision.beta.projects.centernet.ops import nms_ops
@tf.keras.utils.register_keras_serializable(package='centernet')
class CenterNetDetectionGenerator(tf.keras.layers.Layer):
"""CenterNet Detection Generator."""
......
......@@ -23,7 +23,6 @@ from official.modeling import tf_utils
from official.vision.beta.projects.deepmac_maskrcnn.modeling.heads import hourglass_network
@tf.keras.utils.register_keras_serializable(package='Vision')
class DeepMaskHead(tf.keras.layers.Layer):
"""Creates a mask head."""
......
......@@ -31,7 +31,6 @@ def resize_as(source, size):
return tf.transpose(source, (0, 3, 1, 2))
@tf.keras.utils.register_keras_serializable(package='Vision')
class DeepMaskRCNNModel(maskrcnn_model.MaskRCNNModel):
"""The Mask R-CNN model."""
......
......@@ -25,7 +25,6 @@ import tensorflow as tf
from official.vision.beta.projects.example import example_config as example_cfg
@tf.keras.utils.register_keras_serializable(package='Vision')
class ExampleModel(tf.keras.Model):
"""A example model class.
......
# Mobile Video Networks (MoViNets)
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/models/blob/master/official/vision/beta/projects/movinet/movinet_tutorial.ipynb)
[![TensorFlow Hub](https://img.shields.io/badge/TF%20Hub-Models-FF6F00?logo=tensorflow)](https://tfhub.dev/google/collections/movinet)
[![Paper](http://img.shields.io/badge/Paper-arXiv.2103.11511-B3181B?logo=arXiv)](https://arxiv.org/abs/2103.11511)
This repository is the official implementation of
[MoViNets: Mobile Video Networks for Efficient Video
Recognition](https://arxiv.org/abs/2103.11511).
**[UPDATE 2021-07-12] Mobile Models Available via [TF Lite](#tf-lite-streaming-models)**
<p align="center">
<img src="https://storage.googleapis.com/tf_model_garden/vision/movinet/artifacts/hoverboard_stream.gif" height=500>
</p>
## Description
Mobile Video Networks (MoViNets) are efficient video classification models
runnable on mobile devices. MoViNets demonstrate state-of-the-art accuracy and
efficiency on several large-scale video action recognition datasets.
On [Kinetics 600](https://deepmind.com/research/open-source/kinetics),
MoViNet-A6 achieves 84.8% top-1 accuracy, outperforming recent
Vision Transformer models like [ViViT](https://arxiv.org/abs/2103.15691) (83.0%)
and [VATT](https://arxiv.org/abs/2104.11178) (83.6%) without any additional
training data, while using 10x fewer FLOPs. And streaming MoViNet-A0 achieves
72% accuracy while using 3x fewer FLOPs than MobileNetV3-large (68%).
There is a large gap between video model performance of accurate models and
efficient models for video action recognition. On the one hand, 2D MobileNet
CNNs are fast and can operate on streaming video in real time, but are prone to
be noisy and inaccurate. On the other hand, 3D CNNs are accurate, but are
memory and computation intensive and cannot operate on streaming video.
MoViNets bridge this gap, producing:
- State-of-the art efficiency and accuracy across the model family (MoViNet-A0
to A6).
- Streaming models with 3D causal convolutions substantially reducing memory
usage.
- Temporal ensembles of models to boost efficiency even higher.
MoViNets also improve computational efficiency by outputting high-quality
predictions frame by frame, as opposed to the traditional multi-clip evaluation
approach that performs redundant computation and limits temporal scope.
<p align="center">
<img src="https://storage.googleapis.com/tf_model_garden/vision/movinet/artifacts/movinet_multi_clip_eval.png" height=200>
</p>
<p align="center">
<img src="https://storage.googleapis.com/tf_model_garden/vision/movinet/artifacts/movinet_stream_eval.png" height=200>
</p>
## History
- **2021-07-12** Add TF Lite support and replace 3D stream models with
mobile-friendly (2+1)D stream.
- **2021-05-30** Add streaming MoViNet checkpoints and examples.
- **2021-05-11** Initial Commit.
## Authors and Maintainers
* Dan Kondratyuk ([@hyperparticle](https://github.com/hyperparticle))
* Liangzhe Yuan ([@yuanliangzhe](https://github.com/yuanliangzhe))
* Yeqing Li ([@yeqingli](https://github.com/yeqingli))
## Table of Contents
- [Requirements](#requirements)
- [Results and Pretrained Weights](#results-and-pretrained-weights)
- [Kinetics 600](#kinetics-600)
- [Prediction Examples](#prediction-examples)
- [TF Lite Example](#tf-lite-example)
- [Training and Evaluation](#training-and-evaluation)
- [References](#references)
- [License](#license)
- [Citation](#citation)
## Requirements
[![TensorFlow 2.4](https://img.shields.io/badge/TensorFlow-2.1-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.1.0)
[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB?logo=python)](https://www.python.org/downloads/release/python-360/)
To install requirements:
```shell
pip install -r requirements.txt
```
## Results and Pretrained Weights
[![TensorFlow Hub](https://img.shields.io/badge/TF%20Hub-Models-FF6F00?logo=tensorflow)](https://tfhub.dev/google/collections/movinet)
[![TensorBoard](https://img.shields.io/badge/TensorBoard-dev-FF6F00?logo=tensorflow)](https://tensorboard.dev/experiment/Q07RQUlVRWOY4yDw3SnSkA/)
### Kinetics 600
<p align="center">
<img src="https://storage.googleapis.com/tf_model_garden/vision/movinet/artifacts/movinet_comparison.png" height=500>
</p>
[tensorboard.dev summary](https://tensorboard.dev/experiment/Q07RQUlVRWOY4yDw3SnSkA/)
of training runs across all models.
The table below summarizes the performance of each model on
[Kinetics 600](https://deepmind.com/research/open-source/kinetics)
and provides links to download pretrained models. All models are evaluated on
single clips with the same resolution as training.
Note: MoViNet-A6 can be constructed as an ensemble of MoViNet-A4 and
MoViNet-A5.
#### Base Models
Base models implement standard 3D convolutions without stream buffers. Base
models are not recommended for fast inference on CPU or mobile due to
limited support for
[`tf.nn.conv3d`](https://www.tensorflow.org/api_docs/python/tf/nn/conv3d).
Instead, see the [streaming models section](#streaming-models).
| Model Name | Top-1 Accuracy | Top-5 Accuracy | Input Shape | GFLOPs\* | Checkpoint | TF Hub SavedModel |
|------------|----------------|----------------|-------------|----------|------------|-------------------|
| MoViNet-A0-Base | 72.28 | 90.92 | 50 x 172 x 172 | 2.7 | [checkpoint (12 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a0_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a0/base/kinetics-600/classification/) |
| MoViNet-A1-Base | 76.69 | 93.40 | 50 x 172 x 172 | 6.0 | [checkpoint (18 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a1_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a1/base/kinetics-600/classification/) |
| MoViNet-A2-Base | 78.62 | 94.17 | 50 x 224 x 224 | 10 | [checkpoint (20 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a2_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a2/base/kinetics-600/classification/) |
| MoViNet-A3-Base | 81.79 | 95.67 | 120 x 256 x 256 | 57 | [checkpoint (29 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a3_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a3/base/kinetics-600/classification/) |
| MoViNet-A4-Base | 83.48 | 96.16 | 80 x 290 x 290 | 110 | [checkpoint (44 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a4_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a4/base/kinetics-600/classification/) |
| MoViNet-A5-Base | 84.27 | 96.39 | 120 x 320 x 320 | 280 | [checkpoint (72 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a5_base.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a5/base/kinetics-600/classification/) |
\*GFLOPs per video on Kinetics 600.
#### Streaming Models
Streaming models implement causal (2+1)D convolutions with stream buffers.
Streaming models use (2+1)D convolution instead of 3D to utilize optimized
[`tf.nn.conv2d`](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d)
operations, which offer fast inference on CPU. Streaming models can be run on
individual frames or on larger video clips like base models.
Note: A3, A4, and A5 models use a positional encoding in the squeeze-excitation
blocks, while A0, A1, and A2 do not. For the smaller models, accuracy is
unaffected without positional encoding, while for the larger models accuracy is
significantly worse without positional encoding.
| Model Name | Top-1 Accuracy | Top-5 Accuracy | Input Shape\* | GFLOPs\*\* | Checkpoint | TF Hub SavedModel |
|------------|----------------|----------------|---------------|------------|------------|-------------------|
| MoViNet-A0-Stream | 72.05 | 90.63 | 50 x 172 x 172 | 2.7 | [checkpoint (12 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a0_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a0/stream/kinetics-600/classification/) |
| MoViNet-A1-Stream | 76.45 | 93.25 | 50 x 172 x 172 | 6.0 | [checkpoint (18 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a1_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a1/stream/kinetics-600/classification/) |
| MoViNet-A2-Stream | 78.40 | 94.05 | 50 x 224 x 224 | 10 | [checkpoint (20 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a2_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a2/stream/kinetics-600/classification/) |
| MoViNet-A3-Stream | 80.09 | 94.84 | 120 x 256 x 256 | 57 | [checkpoint (29 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a3_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a3/stream/kinetics-600/classification/) |
| MoViNet-A4-Stream | 81.49 | 95.66 | 80 x 290 x 290 | 110 | [checkpoint (44 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a4_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a4/stream/kinetics-600/classification/) |
| MoViNet-A5-Stream | 82.37 | 95.79 | 120 x 320 x 320 | 280 | [checkpoint (72 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a5_stream.tar.gz) | [tfhub](https://tfhub.dev/tensorflow/movinet/a5/stream/kinetics-600/classification/) |
\*In streaming mode, the number of frames correspond to the total accumulated
duration of the 10-second clip.
\*\*GFLOPs per video on Kinetics 600.
Note: current streaming model checkpoints have been updated with a slightly
different architecture. To download the old checkpoints, insert `_legacy` before
`.tar.gz` in the URL. E.g., `movinet_a0_stream_legacy.tar.gz`.
##### TF Lite Streaming Models
For convenience, we provide converted TF Lite models for inference on mobile
devices. See the [TF Lite Example](#tf-lite-example) to export and run your own
models.
For reference, MoViNet-A0-Stream runs with a similar latency to
[MobileNetV3-Large]
(https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/classification/)
with +5% accuracy on Kinetics 600.
| Model Name | Input Shape | Pixel 4 Latency\* | x86 Latency\* | TF Lite Binary |
|------------|-------------|-------------------|---------------|----------------|
| MoViNet-A0-Stream | 1 x 1 x 172 x 172 | 22 ms | 16 ms | [TF Lite (13 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a0_stream.tflite) |
| MoViNet-A1-Stream | 1 x 1 x 172 x 172 | 42 ms | 33 ms | [TF Lite (45 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a1_stream.tflite) |
| MoViNet-A2-Stream | 1 x 1 x 224 x 224 | 200 ms | 66 ms | [TF Lite (53 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a2_stream.tflite) |
| MoViNet-A3-Stream | 1 x 1 x 256 x 256 | - | 120 ms | [TF Lite (73 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a3_stream.tflite) |
| MoViNet-A4-Stream | 1 x 1 x 290 x 290 | - | 300 ms | [TF Lite (101 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a4_stream.tflite) |
| MoViNet-A5-Stream | 1 x 1 x 320 x 320 | - | 450 ms | [TF Lite (153 MB)](https://storage.googleapis.com/tf_model_garden/vision/movinet/movinet_a5_stream.tflite) |
\*Single-frame latency measured on with unaltered float32 operations on a
single CPU core. Observed latency may differ depending on hardware
configuration. Measured on a stock Pixel 4 (Android 11) and x86 Intel Xeon
W-2135 CPU.
## Prediction Examples
Please check out our [Colab Notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/official/vision/beta/projects/movinet/movinet_tutorial.ipynb)
to get started with MoViNets.
This section provides examples on how to run prediction.
For **base models**, run the following:
```python
import tensorflow as tf
from official.vision.beta.projects.movinet.modeling import movinet
from official.vision.beta.projects.movinet.modeling import movinet_model
# Create backbone and model.
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
)
model = movinet_model.MovinetClassifier(
backbone, num_classes=600, output_states=True)
# Create your example input here.
# Refer to the paper for recommended input shapes.
inputs = tf.ones([1, 8, 172, 172, 3])
# [Optional] Build the model and load a pretrained checkpoint
model.build(inputs.shape)
checkpoint_dir = '/path/to/checkpoint'
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint = tf.train.Checkpoint(model=model)
status = checkpoint.restore(checkpoint_path)
status.assert_existing_objects_matched()
# Run the model prediction.
output = model(inputs)
prediction = tf.argmax(output, -1)
```
For **streaming models**, run the following:
```python
import tensorflow as tf
from official.vision.beta.projects.movinet.modeling import movinet
from official.vision.beta.projects.movinet.modeling import movinet_model
model_id = 'a0'
use_positional_encoding = model_id in {'a3', 'a4', 'a5'}
# Create backbone and model.
backbone = movinet.Movinet(
model_id=model_id,
causal=True,
conv_type='2plus1d',
se_type='2plus3d',
activation='hard_swish',
gating_activation='hard_sigmoid',
use_positional_encoding=use_positional_encoding,
use_external_states=True,
)
model = movinet_model.MovinetClassifier(
backbone,
num_classes=600,
output_states=True)
# Create your example input here.
# Refer to the paper for recommended input shapes.
inputs = tf.ones([1, 8, 172, 172, 3])
# [Optional] Build the model and load a pretrained checkpoint.
model.build(inputs.shape)
checkpoint_dir = '/path/to/checkpoint'
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint = tf.train.Checkpoint(model=model)
status = checkpoint.restore(checkpoint_path)
status.assert_existing_objects_matched()
# Split the video into individual frames.
# Note: we can also split into larger clips as well (e.g., 8-frame clips).
# Running on larger clips will slightly reduce latency overhead, but
# will consume more memory.
frames = tf.split(inputs, inputs.shape[1], axis=1)
# Initialize the dict of states. All state tensors are initially zeros.
init_states = model.init_states(tf.shape(inputs))
# Run the model prediction by looping over each frame.
states = init_states
predictions = []
for frame in frames:
output, states = model({**states, 'image': frame})
predictions.append(output)
# The video classification will simply be the last output of the model.
final_prediction = tf.argmax(predictions[-1], -1)
# Alternatively, we can run the network on the entire input video.
# The output should be effectively the same
# (but it may differ a small amount due to floating point errors).
non_streaming_output, _ = model({**init_states, 'image': inputs})
non_streaming_prediction = tf.argmax(non_streaming_output, -1)
```
## TF Lite Example
This section outlines an example on how to export a model to run on mobile
devices with [TF Lite](https://www.tensorflow.org/lite).
First, convert to [TF SavedModel](https://www.tensorflow.org/guide/saved_model)
by running `export_saved_model.py`. For example, for `MoViNet-A0-Stream`, run:
```shell
python3 export_saved_model.py \
--model_id=a0 \
--causal=True \
--conv_type=2plus1d \
--se_type=2plus3d \
--activation=hard_swish \
--gating_activation=hard_sigmoid \
--use_positional_encoding=False \
--num_classes=600 \
--batch_size=1 \
--num_frames=1 \
--image_size=172 \
--bundle_input_init_states_fn=False \
--checkpoint_path=/path/to/checkpoint \
--export_path=/tmp/movinet_a0_stream
```
Then the SavedModel can be converted to TF Lite using the [`TFLiteConverter`](https://www.tensorflow.org/lite/convert):
```python
saved_model_dir = '/tmp/movinet_a0_stream'
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
with open('/tmp/movinet_a0_stream.tflite', 'wb') as f:
f.write(tflite_model)
```
To run with TF Lite using [tf.lite.Interpreter](https://www.tensorflow.org/lite/guide/inference#load_and_run_a_model_in_python)
with the Python API:
```python
# Create the interpreter and signature runner
interpreter = tf.lite.Interpreter('/tmp/movinet_a0_stream.tflite')
runner = interpreter.get_signature_runner()
# Extract state names and create the initial (zero) states
def state_name(name: str) -> str:
return name[len('serving_default_'):-len(':0')]
init_states = {
state_name(x['name']): tf.zeros(x['shape'], dtype=x['dtype'])
for x in interpreter.get_input_details()
}
del init_states['image']
# Insert your video clip here
video = tf.ones([1, 8, 172, 172, 3])
clips = tf.split(video, video.shape[1], axis=1)
# To run on a video, pass in one frame at a time
states = init_states
for clip in clips:
# Input shape: [1, 1, 172, 172, 3]
outputs = runner(**states, image=clip)
logits = outputs.pop('logits')
states = outputs
```
Follow the [official guide](https://www.tensorflow.org/lite/guide) to run a
model with TF Lite on your mobile device.
## Training and Evaluation
Run this command line for continuous training and evaluation.
```shell
MODE=train_and_eval # Can also be 'train' if using a separate evaluator job
CONFIG_FILE=official/vision/beta/projects/movinet/configs/yaml/movinet_a0_k600_8x8.yaml
python3 official/vision/beta/projects/movinet/train.py \
--experiment=movinet_kinetics600 \
--mode=${MODE} \
--model_dir=/tmp/movinet_a0_base/ \
--config_file=${CONFIG_FILE}
```
Run this command line for evaluation.
```shell
MODE=eval # Can also be 'eval_continuous' for use during training
CONFIG_FILE=official/vision/beta/projects/movinet/configs/yaml/movinet_a0_k600_8x8.yaml
python3 official/vision/beta/projects/movinet/train.py \
--experiment=movinet_kinetics600 \
--mode=${MODE} \
--model_dir=/tmp/movinet_a0_base/ \
--config_file=${CONFIG_FILE}
```
## License
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
This project is licensed under the terms of the **Apache License 2.0**.
## Citation
If you want to cite this code in your research paper, please use the following
information.
```
@article{kondratyuk2021movinets,
title={MoViNets: Mobile Video Networks for Efficient Video Recognition},
author={Dan Kondratyuk, Liangzhe Yuan, Yandong Li, Li Zhang, Matthew Brown, and Boqing Gong},
journal={arXiv preprint arXiv:2103.11511},
year={2021}
}
```
The MoViNet project has moved to [official/projects/movinet](https://github.com/tensorflow/models/tree/master/official/projects/movinet).
\ No newline at end of file
......@@ -5,7 +5,7 @@
Panoptic Segmentation combines the two distinct vision tasks - semantic
segmentation and instance segmentation. These tasks are unified such that, each
pixel in the image is assigned the label of the class it belongs to, and also
the instance identifier of the object it a part of.
the instance identifier of the object it is a part of.
## Environment setup
The code can be run on multiple GPUs or TPUs with different distribution
......@@ -13,8 +13,85 @@ strategies. See the TensorFlow distributed training
[guide](https://www.tensorflow.org/guide/distributed_training) for an overview
of `tf.distribute`.
The code is compatible with TensorFlow 2.4+. See requirements.txt for all
prerequisites, and you can also install them using the following command. `pip
install -r ./official/requirements.txt`
The code is compatible with TensorFlow 2.6+. See requirements.txt for all
prerequisites.
**DISCLAIMER**: Panoptic MaskRCNN is still under active development, stay tuned!
```bash
$ git clone https://github.com/tensorflow/models.git
$ cd models
$ pip3 install -r official/requirements.txt
$ export PYTHONPATH=$(pwd)
```
## Preparing Dataset
```bash
$ ./official/vision/beta/data/process_coco_panoptic.sh <path-to-data-directory>
```
## Launch Training
```bash
$ export MODEL_DIR="gs://<path-to-model-directory>"
$ export TPU_NAME="<tpu-name>"
$ export ANNOTATION_FILE="gs://<path-to-coco-annotation-json>"
$ export TRAIN_DATA="gs://<path-to-train-data>"
$ export EVAL_DATA="gs://<path-to-eval-data>"
$ export OVERRIDES="task.validation_data.input_path=${EVAL_DATA},\
task.train_data.input_path=${TRAIN_DATA},\
task.annotation_file=${ANNOTATION_FILE},\
runtime.distribution_strategy=tpu"
$ python3 train.py \
--experiment panoptic_fpn_coco \
--config_file configs/experiments/r50fpn_1x_coco.yaml \
--mode train \
--model_dir $MODEL_DIR \
--tpu $TPU_NAME \
--params_override=$OVERRIDES
```
## Launch Evaluation
```bash
$ export MODEL_DIR="gs://<path-to-model-directory>"
$ export NUM_GPUS="<number-of-gpus>"
$ export PRECISION="<floating-point-precision>"
$ export ANNOTATION_FILE="gs://<path-to-coco-annotation-json>"
$ export TRAIN_DATA="gs://<path-to-train-data>"
$ export EVAL_DATA="gs://<path-to-eval-data>"
$ export OVERRIDES="task.validation_data.input_path=${EVAL_DATA}, \
task.train_data.input_path=${TRAIN_DATA}, \
task.annotation_file=${ANNOTATION_FILE}, \
runtime.distribution_strategy=mirrored, \
runtime.mixed_precision_dtype=$PRECISION, \
runtime.num_gpus=$NUM_GPUS"
$ python3 train.py \
--experiment panoptic_fpn_coco \
--config_file configs/experiments/r50fpn_1x_coco.yaml \
--mode eval \
--model_dir $MODEL_DIR \
--params_override=$OVERRIDES
```
**Note**: The [PanopticSegmentationGenerator](https://github.com/tensorflow/models/blob/ac7f9e7f2d0508913947242bad3e23ef7cae5a43/official/vision/beta/projects/panoptic_maskrcnn/modeling/layers/panoptic_segmentation_generator.py#L22) layer uses dynamic shapes and hence generating panoptic masks is not supported on Cloud TPUs. Running evaluation on Cloud TPUs is not supported for the same reason. However, training is supported on both Cloud TPUs and GPUs.
## Pretrained Models
### Panoptic FPN
Backbone | Schedule | Experiment name | Box mAP | Mask mAP | Overall PQ | Things PQ | Stuff PQ | Checkpoints
:------------| :----------- | :---------------------------| ------- | ---------- | ---------- | --------- | -------- | ------------:
ResNet-50 | 1x | `panoptic_fpn_coco` | 38.19 | 34.25 | 39.14 | 45.42 | 29.65 | [ckpt](gs://tf_model_garden/vision/panoptic/panoptic_fpn/panoptic_fpn_1x)
ResNet-50 | 3x | `panoptic_fpn_coco` | 40.64 | 36.29 | 40.91 | 47.68 | 30.69 | [ckpt](gs://tf_model_garden/vision/panoptic/panoptic_fpn/panoptic_fpn_3x)
**Note**: Here 1x schedule refers to ~12 epochs
___
## Citation
```
@misc{kirillov2019panoptic,
title={Panoptic Feature Pyramid Networks},
author={Alexander Kirillov and Ross Girshick and Kaiming He and Piotr Dollár},
year={2019},
eprint={1901.02446},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
runtime:
distribution_strategy: 'tpu'
mixed_precision_dtype: 'bfloat16'
task:
init_checkpoint: 'gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080'
annotation_file: 'coco/instances_val2017.json'
train_data:
global_batch_size: 64
validation_data:
global_batch_size: 8
trainer:
train_steps: 22500
optimizer_config:
learning_rate:
type: 'stepwise'
stepwise:
boundaries: [15000, 20000]
values: [0.12, 0.012, 0.0012]
warmup:
type: 'linear'
linear:
warmup_steps: 500
warmup_learning_rate: 0.0067
runtime:
distribution_strategy: 'tpu'
mixed_precision_dtype: 'bfloat16'
task:
init_checkpoint: 'gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080'
annotation_file: 'coco/instances_val2017.json'
train_data:
global_batch_size: 64
validation_data:
global_batch_size: 8
trainer:
train_steps: 67500
optimizer_config:
learning_rate:
type: 'stepwise'
stepwise:
boundaries: [45000, 60000]
values: [0.12, 0.012, 0.0012]
warmup:
type: 'linear'
linear:
warmup_steps: 500
warmup_learning_rate: 0.0067
......@@ -21,7 +21,6 @@ import tensorflow as tf
from official.vision.beta.modeling import maskrcnn_model
@tf.keras.utils.register_keras_serializable(package='Vision')
class PanopticMaskRCNNModel(maskrcnn_model.MaskRCNNModel):
"""The Panoptic Segmentation model."""
......
......@@ -13,14 +13,13 @@
# limitations under the License.
"""Panoptic MaskRCNN task definition."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from typing import Any, List, Mapping, Optional, Tuple, Dict
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import task_factory
from official.vision.beta.dataloaders import input_reader_factory
from official.vision.beta.evaluation import coco_evaluator
from official.vision.beta.evaluation import panoptic_quality_evaluator
from official.vision.beta.evaluation import segmentation_metrics
from official.vision.beta.losses import segmentation_losses
......@@ -235,10 +234,7 @@ class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
dtype=tf.float32)
else:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self.task_config.annotation_file,
include_mask=self.task_config.model.include_mask,
per_category_metrics=self.task_config.per_category_metrics)
self._build_coco_metrics()
rescale_predictions = (not self.task_config.validation_data.parser
.segmentation_resize_eval_groundtruth)
......@@ -379,12 +375,12 @@ class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
'image_info': labels['image_info']
}
logs.update(
{self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)})
if self._process_iou_metric_on_cpu:
logs.update({
self.coco_metric.name: (labels['groundtruths'], coco_model_outputs),
self.segmentation_perclass_iou_metric.name: (
segmentation_labels,
outputs['segmentation_outputs'])
self.segmentation_perclass_iou_metric.name:
(segmentation_labels, outputs['segmentation_outputs'])
})
else:
self.segmentation_perclass_iou_metric.update_state(
......@@ -430,7 +426,7 @@ class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
result[self.coco_metric.name] = super(
result = super(
PanopticMaskRCNNTask, self).reduce_aggregated_logs(
aggregated_logs=aggregated_logs,
global_step=global_step)
......
......@@ -24,7 +24,6 @@ regularizers = tf.keras.regularizers
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='simclr')
class ProjectionHead(tf.keras.layers.Layer):
"""Projection head."""
......@@ -144,7 +143,6 @@ class ProjectionHead(tf.keras.layers.Layer):
return proj_head_output, proj_finetune_output
@tf.keras.utils.register_keras_serializable(package='simclr')
class ClassificationHead(tf.keras.layers.Layer):
"""Classification Head."""
......
......@@ -57,7 +57,6 @@ def cross_replica_concat(tensor: tf.Tensor, num_replicas: int) -> tf.Tensor:
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
@tf.keras.utils.register_keras_serializable(package='simclr')
class ContrastiveLoss(object):
"""Contrastive training loss function."""
......
......@@ -22,7 +22,6 @@ from official.modeling import tf_utils
regularizers = tf.keras.regularizers
@tf.keras.utils.register_keras_serializable(package='simclr')
class DenseBN(tf.keras.layers.Layer):
"""Modified Dense layer to help build simclr system.
......
......@@ -27,7 +27,6 @@ PROJECTION_OUTPUT_KEY = 'projection_outputs'
SUPERVISED_OUTPUT_KEY = 'supervised_outputs'
@tf.keras.utils.register_keras_serializable(package='simclr')
class SimCLRModel(tf.keras.Model):
"""A classification model based on SimCLR framework."""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment