Commit fbc5ba06 authored by Zhichao Lu's avatar Zhichao Lu
Browse files

Resolve commnets

parent 3e05f21a
...@@ -33,6 +33,7 @@ https://scholar.googleusercontent.com/scholar.bib?q=info:l291WsrB-hQJ:scholar.go ...@@ -33,6 +33,7 @@ https://scholar.googleusercontent.com/scholar.bib?q=info:l291WsrB-hQJ:scholar.go
* Chen Sun, github: [jesu9](https://github.com/jesu9) * Chen Sun, github: [jesu9](https://github.com/jesu9)
* Menglong Zhu, github: [dreamdragon](https://github.com/dreamdragon) * Menglong Zhu, github: [dreamdragon](https://github.com/dreamdragon)
* Alireza Fathi, github: [afathi3](https://github.com/afathi3) * Alireza Fathi, github: [afathi3](https://github.com/afathi3)
* Zhichao Lu, github: [pkulzc](https://github.com/pkulzc)
## Table of contents ## Table of contents
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
Generates grid anchors on the fly corresponding to multiple CNN layers as Generates grid anchors on the fly corresponding to multiple CNN layers as
described in: described in:
"Focal Loss for Dense Object Detection" "Focal Loss for Dense Object Detection"
T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar (https://arxiv.org/abs/1708.02002)
""" """
from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import grid_anchor_generator
...@@ -25,7 +25,7 @@ from object_detection.core import box_list_ops ...@@ -25,7 +25,7 @@ from object_detection.core import box_list_ops
class MultiscaleGridAnchorGenerator(object): class MultiscaleGridAnchorGenerator(object):
"""Generate a grid of anchors for multiple CNN layers.""" """Generate a grid of anchors for multiple CNN layers of different scale."""
def __init__(self, min_level, max_level, anchor_scale, aspect_ratios, def __init__(self, min_level, max_level, anchor_scale, aspect_ratios,
scales_per_octave): scales_per_octave):
......
...@@ -657,7 +657,7 @@ def filter_greater_than(boxlist, thresh, scope=None): ...@@ -657,7 +657,7 @@ def filter_greater_than(boxlist, thresh, scope=None):
This op keeps the collection of boxes whose corresponding scores are This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold. greater than the input threshold.
TODO: Change function name to FilterScoresGreaterThan TODO: Change function name to filter_scores_greater_than
Args: Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field boxlist: BoxList holding N boxes. Must contain a 'scores' field
......
...@@ -101,8 +101,7 @@ class BoxPredictor(object): ...@@ -101,8 +101,7 @@ class BoxPredictor(object):
with tf.variable_scope(scope): with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location, return self._predict(image_features, num_predictions_per_location,
**params) **params)
else: return self._predict(image_features, num_predictions_per_location,
return self._predict(image_features, num_predictions_per_location,
**params) **params)
# TODO: num_predictions_per_location could be moved to constructor. # TODO: num_predictions_per_location could be moved to constructor.
......
...@@ -40,7 +40,7 @@ Output classes are always integers in the range [0, num_classes). Any mapping ...@@ -40,7 +40,7 @@ Output classes are always integers in the range [0, num_classes). Any mapping
of these integers to semantic labels is to be handled outside of this class. of these integers to semantic labels is to be handled outside of this class.
Images are resized in the `preprocess` method. All of `preprocess`, `predict`, Images are resized in the `preprocess` method. All of `preprocess`, `predict`,
and `postprocess` should be stateless. and `postprocess` should be reentrant.
The `preprocess` method runs `image_resizer_fn` that returns resized_images and The `preprocess` method runs `image_resizer_fn` that returns resized_images and
`true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros, `true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros,
......
...@@ -102,7 +102,7 @@ def open_sharded_output_tfrecords(exit_stack, base_path, num_shards): ...@@ -102,7 +102,7 @@ def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
""" """
tf_record_output_filenames = [ tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards) '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in xrange(num_shards) for idx in range(num_shards)
] ]
tfrecords = [ tfrecords = [
......
...@@ -117,9 +117,8 @@ def get_evaluators(eval_config, categories): ...@@ -117,9 +117,8 @@ def get_evaluators(eval_config, categories):
for eval_metric_fn_key in eval_metric_fn_keys: for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
else: evaluators_list.append(
evaluators_list.append( EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
return evaluators_list return evaluators_list
......
...@@ -103,7 +103,7 @@ FLAGS = flags.FLAGS ...@@ -103,7 +103,7 @@ FLAGS = flags.FLAGS
def create_tf_example(example): def create_tf_example(example):
# TODO: Populate the following variables from your example. # TODO(user): Populate the following variables from your example.
height = None # Image height height = None # Image height
width = None # Image width width = None # Image width
filename = None # Filename of the image. Empty if image is not from file filename = None # Filename of the image. Empty if image is not from file
...@@ -139,7 +139,7 @@ def create_tf_example(example): ...@@ -139,7 +139,7 @@ def create_tf_example(example):
def main(_): def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path) writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
# TODO: Write code to read in your dataset to examples variable # TODO(user): Write code to read in your dataset to examples variable
for example in examples: for example in examples:
tf_example = create_tf_example(example) tf_example = create_tf_example(example)
......
...@@ -130,7 +130,7 @@ class SSDMetaArch(model.DetectionModel): ...@@ -130,7 +130,7 @@ class SSDMetaArch(model.DetectionModel):
add_summaries=True): add_summaries=True):
"""SSDMetaArch Constructor. """SSDMetaArch Constructor.
TODO(rathodv,jonathanhuang): group NMS parameters + score converter into TODO: group NMS parameters + score converter into
a class and loss parameters into a class and write config protos for a class and loss parameters into a class and write config protos for
postprocessing and losses. postprocessing and losses.
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with COCO metrics.""" """Class for evaluating object detections with COCO metrics."""
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.understanding.object_detection.metrics.coco_evaluation.""" """Tests for image.understanding.object_detection.metrics.coco_evaluation."""
from __future__ import absolute_import from __future__ import absolute_import
......
"""Wrappers for third party pycocotools to be used within i/u/object_detection. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example. be called directly as a slim metric, for example.
......
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.metrics.coco_tools.""" """Tests for google3.image.understanding.object_detection.metrics.coco_tools."""
import json import json
import os import os
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment