Unverified Commit ca552843 authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'panoptic-segmentation' into panoptic-segmentation

parents 7e2f7a35 6b90e134
......@@ -29,13 +29,49 @@ def _dump_graph_in_text_format(filename, graph_def):
class InterpreterWithCustomOps(tf.lite.Interpreter):
"""Extended tf.lite.Interpreter."""
def __init__(self, model_content, custom_op_registerers):
self._custom_op_registerers = custom_op_registerers
def __init__(self, model_content, custom_op_registerers=None):
self._custom_op_registerers = custom_op_registerers or []
super(InterpreterWithCustomOps, self).__init__(model_content=model_content)
def op_details(self):
op_details = {}
try:
op_details = self._get_ops_details() # Accessing experimental method.
except AttributeError:
print('Unable to access op details')
return op_details
def set_output_quantized_for_custom_ops(graph_def):
def op_histogram(self):
op_hist = {}
op_list = self.op_details()
for op in op_list:
if op['op_name'] in op_hist:
op_hist[op['op_name']] += 1
else:
op_hist[op['op_name']] = 1
return op_hist
def check_op_histogram(self, expected):
passed = True
for k, v in self.op_histogram().items():
if k not in expected:
print('Unexpected key {} found {} times.'.format(k, v))
passed = False
continue
elif expected[k] != v:
print('Expected {} counts of key {} found {}.'.format(
expected[k], k, v))
passed = False
del expected[k]
for k, v in expected.items():
print('Missing expected key {} value {}.'.format(k, v))
passed = False
return passed
def set_output_quantized_for_custom_ops(graph_def, use_mlir=True):
"""Set output types/quantized flag for custom/unsupported ops."""
quantized_custom_ops = {
'SequenceStringProjection': [tf.float32.as_datatype_enum],
......@@ -44,6 +80,8 @@ def set_output_quantized_for_custom_ops(graph_def):
'ExpectedValueOp': [tf.float32.as_datatype_enum],
'LayerNorm': [tf.float32.as_datatype_enum],
'UniformCausalAttn': [tf.float32.as_datatype_enum],
'RnnDecoderReadState': [tf.float32.as_datatype_enum],
'RnnDecoderWriteState': [tf.float32.as_datatype_enum],
}
custom_op_renames = {
'SequenceStringProjection': 'SEQUENCE_STRING_PROJECTION',
......@@ -52,30 +90,27 @@ def set_output_quantized_for_custom_ops(graph_def):
for node in graph_def.node:
if node.op in quantized_custom_ops:
node.attr['_output_quantized'].b = True
node.attr['_output_types'].list.type[:] = quantized_custom_ops[node.op]
if node.op in custom_op_renames:
if use_mlir:
node.attr['_tfl_quant_trait'].s = str.encode('fully_quantizable')
else:
node.attr['_output_quantized'].b = True
node.attr['_output_types'].list.type[:] = quantized_custom_ops[node.op]
if not use_mlir and node.op in custom_op_renames:
node.op = custom_op_renames[node.op]
def generate_tflite(session, graph, input_tensors, output_tensors):
def generate_tflite(session,
graph,
input_tensors,
output_tensors,
use_mlir=True):
"""Generate TFLite model from a session, graph and input/output tensors."""
output_nodes = [tensor.name.split(':')[0] for tensor in output_tensors]
graph_def = tf.graph_util.convert_variables_to_constants(
session, graph.as_graph_def(), output_nodes)
set_output_quantized_for_custom_ops(graph_def)
# TODO(b/171063452): Bug needs to be fixed to handle this correctly.
# def _node_name(tensor):
# return tensor.name.split(':')[0]
set_output_quantized_for_custom_ops(graph_def, use_mlir)
# input_arrays_with_shape = [
# (_node_name(tensor), None) for tensor in input_tensors
# ]
# output_arrays = [_node_name(tensor) for tensor in output_tensors]
# converter = tf.lite.TFLiteConverter(graph_def, None, None,
# input_arrays_with_shape, output_arrays)
converter = tf.lite.TFLiteConverter(graph_def, input_tensors, output_tensors)
converter.inference_type = tf.uint8
converter.default_ranges_stats = (127.5, 127.5)
......@@ -83,5 +118,5 @@ def generate_tflite(session, graph, input_tensors, output_tensors):
tensor.op.name: (127.5, 127.5) for tensor in input_tensors
}
converter.allow_custom_ops = True
converter.experimental_new_converter = False
converter.experimental_new_converter = use_mlir
return converter.convert()
......@@ -197,9 +197,10 @@
},
"outputs": [],
"source": [
"# setup path\n",
"# setup path and install tf-slim\n",
"import sys\n",
"sys.path.append('/content/models/research/slim')"
"sys.path.append('/content/models/research/slim')",
"!pip install tf_slim"
]
},
{
......@@ -228,8 +229,10 @@
"outputs": [],
"source": [
"import tensorflow.compat.v1 as tf\n",
"import tf_slim as slim\n",
"from nets.mobilenet import mobilenet_v2\n",
"\n",
"tf.compat.v1.disable_eager_execution()\n",
"tf.reset_default_graph()\n",
"\n",
"# For simplicity we just decode jpeg inside tensorflow.\n",
......@@ -244,7 +247,7 @@
"images = tf.image.resize_images(images, (224, 224))\n",
"\n",
"# Note: arg_scope is optional for inference.\n",
"with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope(is_training=False)):\n",
"with slim.arg_scope(mobilenet_v2.training_scope(is_training=False)):\n",
" logits, endpoints = mobilenet_v2.mobilenet(images)\n",
" \n",
"# Restore using exponential moving average since it produces (1.5-2%) higher \n",
......
......@@ -21,6 +21,11 @@ ArXiv: [https://arxiv.org/pdf/1802.05522.pdf](https://arxiv.org/pdf/1802.05522.p
<a href="https://sites.google.com/view/vid2depth"><img src='https://storage.googleapis.com/vid2depth/media/approach.png' width=400></a>
</p>
## Update: TF2 version.
Please see [https://github.com/IAMAl/vid2depth_tf2](https://github.com/IAMAl/vid2depth_tf2)
for a TF2 implementation of vid2depth.
## 1. Installation
### Requirements
......@@ -36,10 +41,6 @@ pip install scipy
pip install tensorflow
```
#### For building the ICP op (work in progress)
* Bazel: https://bazel.build/
### Download vid2depth
```shell
......@@ -60,11 +61,27 @@ unzip "*.zip"
### Download Cityscapes dataset (110GB) (optional)
You will need to register in order to download the data. Download the following files:
You will need to register in order to download the data. Download the following
files:
* leftImg8bit_sequence_trainvaltest.zip
* camera_trainvaltest.zip
### Download Bike dataset (34GB) (optional)
Please see [https://research.google/tools/datasets/bike-video/](https://research.google/tools/datasets/bike-video/)
for info on the bike video dataset.
Special thanks to [Guangming Wang](https://guangmingw.github.io/) for helping us
restore this dataset after it was accidentally deleted.
```shell
mkdir -p ~/vid2depth/bike-uncompressed
cd ~/vid2depth/bike-uncompressed
wget https://storage.googleapis.com/vid2depth/dataset/BikeVideoDataset.tar
tar xvf BikeVideoDataset.tar
```
## 3. Inference
### Download trained model
......@@ -113,23 +130,28 @@ python dataset/gen_data.py \
--seq_length 3
```
### Compile the ICP op (work in progress)
The ICP op depends on multiple software packages (TensorFlow, Point Cloud
Library, FLANN, Boost, HDF5). The Bazel build system requires individual BUILD
files for each of these packages. We have included a partial implementation of
these BUILD files inside the third_party directory. But they are not ready for
compiling the op. If you manage to build the op, please let us know so we can
include your contribution.
### Prepare Bike training sequences (optional)
```shell
# Prepare training sequences.
cd tensorflow/models/research/vid2depth
bazel build ops:pcl_demo # Build test program using PCL only.
bazel build ops:icp_op.so
python dataset/gen_data.py \
--dataset_name bike \
--dataset_dir ~/vid2depth/bike-uncompressed \
--data_dir ~/vid2depth/data/bike \
--seq_length 3
```
For the time being, it is possible to run inference on the pre-trained model and
run training without the icp loss.
### Compile the ICP op
The pre-trained model is trained using the ICP loss. It is possible to run
inference on this pre-trained model without compiling the ICP op. It is also
possible to train a new model from scratch without compiling the ICP op by
setting the icp loss to zero.
If you would like to compile the op and run a new training job using it, please
use the CMakeLists.txt file at
[https://github.com/IAMAl/vid2depth_tf2/tree/master/ops](https://github.com/IAMAl/vid2depth_tf2/tree/master/ops).
### Run training
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment