Commit c732df65 authored by limm's avatar limm
Browse files

push v0.1.3 version commit bd2ea47

parent 5b3792fc
Pipeline #706 failed with stages
in 0 seconds
# Minimal makefile for Sphinx documentation
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
# Read the docs:
The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/).
Documents in this directory are not meant to be read on github.
# Build the docs:
1. Install detectron2 according to [INSTALL.md](INSTALL.md).
2. Install additional libraries required to build docs:
- docutils==0.16
- Sphinx==3.0.0
- recommonmark==0.6.0
- sphinx_rtd_theme
- mock
3. Run `make html` from this directory.
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# flake8: noqa
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import mock
from sphinx.domains import Domain
from typing import Dict, List, Tuple
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/facebookresearch/detectron2/blob/master/"
LINKED_DOC = ["tutorials/install", "tutorials/getting_started"]
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if not target.endswith("html") and target.startswith("../../"):
url = target.replace("../", "")
github_url = url
if fromdocname in self.LINKED_DOC:
# unresolved links in these docs are all github links
github_url = target
if github_url is not None:
if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# to support markdown
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath("../"))
os.environ["DOC_BUILDING"] = "True"
DEPLOY = os.environ.get("READTHEDOCS") == "True"
# -- Project information -----------------------------------------------------
# fmt: off
try:
import torch # noqa
except ImportError:
for m in [
"torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd",
"torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx",
"torchvision", "torchvision.ops",
]:
sys.modules[m] = mock.Mock(name=m)
sys.modules['torch'].__version__ = "1.5" # fake version
for m in [
"cv2", "scipy", "portalocker", "detectron2._C",
"pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval",
"google", "google.protobuf", "google.protobuf.internal", "onnx",
"caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend",
]:
sys.modules[m] = mock.Mock(name=m)
# fmt: on
sys.modules["cv2"].__version__ = "3.4"
import detectron2 # isort: skip
project = "detectron2"
copyright = "2019-2020, detectron2 contributors"
author = "detectron2 contributors"
# The short X.Y version
version = detectron2.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"recommonmark",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
if DEPLOY:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "detectron2doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"detectron2",
"detectron2 Documentation",
author,
"detectron2",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
_DEPRECATED_NAMES = set()
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, "__HIDE_SPHINX_DOC__", False):
return True
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
return None
_PAPER_DATA = {
"resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"),
"fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"),
"mask r-cnn": ("1703.06870", "Mask R-CNN"),
"faster r-cnn": (
"1506.01497",
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks",
),
"deformconv": ("1703.06211", "Deformable Convolutional Networks"),
"deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"),
"panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"),
"retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"),
"cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"),
"lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"),
"rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"),
"in1k1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"),
}
def paper_ref_role(
typ: str,
rawtext: str,
text: str,
lineno: int,
inliner,
options: Dict = {},
content: List[str] = [],
):
"""
Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension.
"""
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
text = utils.unescape(text)
has_explicit_title, title, link = split_explicit_title(text)
link = link.lower()
if link not in _PAPER_DATA:
inliner.reporter.warning("Cannot find paper " + link)
paper_url, paper_title = "#", link
else:
paper_url, paper_title = _PAPER_DATA[link]
if "/" not in paper_url:
paper_url = "https://arxiv.org/abs/" + paper_url
if not has_explicit_title:
title = paper_title
pnode = nodes.reference(title, title, internal=False, refuri=paper_url)
return [pnode], []
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect("autodoc-skip-member", autodoc_skip_member)
app.add_role("paper", paper_ref_role)
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
.. detectron2 documentation master file, created by
sphinx-quickstart on Sat Sep 21 13:46:45 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to detectron2's documentation!
======================================
.. toctree::
:maxdepth: 2
tutorials/index
notes/index
modules/index
detectron2.checkpoint package
=============================
.. automodule:: detectron2.checkpoint
:members:
:undoc-members:
:show-inheritance:
detectron2.config package
=========================
.. automodule:: detectron2.config
:members:
:undoc-members:
:show-inheritance:
:inherited-members:
Config References
-----------------
.. literalinclude:: ../../detectron2/config/defaults.py
:language: python
:linenos:
:lines: 4-
detectron2.data package
=======================
.. automodule:: detectron2.data
:members:
:undoc-members:
:show-inheritance:
detectron2.data.detection\_utils module
---------------------------------------
.. automodule:: detectron2.data.detection_utils
:members:
:undoc-members:
:show-inheritance:
detectron2.data.datasets module
---------------------------------------
.. automodule:: detectron2.data.datasets
:members:
:undoc-members:
:show-inheritance:
detectron2.data.samplers module
---------------------------------------
.. automodule:: detectron2.data.samplers
:members:
:undoc-members:
:show-inheritance:
detectron2.data.transforms module
---------------------------------------
.. automodule:: detectron2.data.transforms
:members:
:undoc-members:
:show-inheritance:
detectron2.engine package
=========================
.. automodule:: detectron2.engine
:members:
:undoc-members:
:show-inheritance:
detectron2.engine.defaults module
---------------------------------
.. automodule:: detectron2.engine.defaults
:members:
:undoc-members:
:show-inheritance:
detectron2.engine.hooks module
---------------------------------
.. automodule:: detectron2.engine.hooks
:members:
:undoc-members:
:show-inheritance:
detectron2.evaluation package
=============================
.. automodule:: detectron2.evaluation
:members:
:undoc-members:
:show-inheritance:
detectron2.export package
=========================
.. automodule:: detectron2.export
:members:
:undoc-members:
:show-inheritance:
API Documentation
==================
.. toctree::
checkpoint
config
data
engine
evaluation
layers
model_zoo
modeling
solver
structures
utils
export
detectron2.layers package
=========================
.. automodule:: detectron2.layers
:members:
:undoc-members:
:show-inheritance:
detectron2.model_zoo package
============================
.. automodule:: detectron2.model_zoo
:members:
:undoc-members:
:show-inheritance:
detectron2.modeling package
===========================
.. automodule:: detectron2.modeling
:members:
:undoc-members:
:show-inheritance:
detectron2.modeling.poolers module
---------------------------------------
.. automodule:: detectron2.modeling.poolers
:members:
:undoc-members:
:show-inheritance:
detectron2.modeling.sampling module
------------------------------------
.. automodule:: detectron2.modeling.sampling
:members:
:undoc-members:
:show-inheritance:
detectron2.modeling.box_regression module
------------------------------------------
.. automodule:: detectron2.modeling.box_regression
:members:
:undoc-members:
:show-inheritance:
Model Registries
-----------------
These are different registries provided in modeling.
Each registry provide you the ability to replace it with your customized component,
without having to modify detectron2's code.
Note that it is impossible to allow users to customize any line of code directly.
Even just to add one line at some place,
you'll likely need to find out the smallest registry which contains that line,
and register your component to that registry.
.. autodata:: detectron2.modeling.META_ARCH_REGISTRY
.. autodata:: detectron2.modeling.BACKBONE_REGISTRY
.. autodata:: detectron2.modeling.PROPOSAL_GENERATOR_REGISTRY
.. autodata:: detectron2.modeling.RPN_HEAD_REGISTRY
.. autodata:: detectron2.modeling.ANCHOR_GENERATOR_REGISTRY
.. autodata:: detectron2.modeling.ROI_HEADS_REGISTRY
.. autodata:: detectron2.modeling.ROI_BOX_HEAD_REGISTRY
.. autodata:: detectron2.modeling.ROI_MASK_HEAD_REGISTRY
.. autodata:: detectron2.modeling.ROI_KEYPOINT_HEAD_REGISTRY
detectron2.solver package
=========================
.. automodule:: detectron2.solver
:members:
:undoc-members:
:show-inheritance:
detectron2.structures package
=============================
.. automodule:: detectron2.structures
:members:
:undoc-members:
:show-inheritance:
detectron2.utils package
========================
detectron2.utils.colormap module
--------------------------------
.. automodule:: detectron2.utils.colormap
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.comm module
----------------------------
.. automodule:: detectron2.utils.comm
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.events module
------------------------------
.. automodule:: detectron2.utils.events
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.logger module
------------------------------
.. automodule:: detectron2.utils.logger
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.registry module
--------------------------------
.. automodule:: detectron2.utils.registry
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.memory module
----------------------------------
.. automodule:: detectron2.utils.memory
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.analysis module
----------------------------------
.. automodule:: detectron2.utils.analysis
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.visualizer module
----------------------------------
.. automodule:: detectron2.utils.visualizer
:members:
:undoc-members:
:show-inheritance:
detectron2.utils.video\_visualizer module
-----------------------------------------
.. automodule:: detectron2.utils.video_visualizer
:members:
:undoc-members:
:show-inheritance:
# Benchmarks
Here we benchmark the training speed of a Mask R-CNN in detectron2,
with some other popular open source Mask R-CNN implementations.
### Settings
* Hardware: 8 NVIDIA V100s with NVLink.
* Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5,
TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820.
* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the
[Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml)
(it does no have scale augmentation).
* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time.
Note that for R-CNN-style models, the throughput of a model typically changes during training, because
it depends on the predictions of the model. Therefore this metric is not directly comparable with
"train speed" in model zoo, which is the average speed of the entire training run.
### Main Results
```eval_rst
+-------------------------------+--------------------+
| Implementation | Throughput (img/s) |
+===============================+====================+
| |D2| |PT| | 62 |
+-------------------------------+--------------------+
| mmdetection_ |PT| | 53 |
+-------------------------------+--------------------+
| maskrcnn-benchmark_ |PT| | 53 |
+-------------------------------+--------------------+
| tensorpack_ |TF| | 50 |
+-------------------------------+--------------------+
| simpledet_ |mxnet| | 39 |
+-------------------------------+--------------------+
| Detectron_ |C2| | 19 |
+-------------------------------+--------------------+
| `matterport/Mask_RCNN`__ |TF| | 14 |
+-------------------------------+--------------------+
.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/
.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN
.. _mmdetection: https://github.com/open-mmlab/mmdetection/
.. _simpledet: https://github.com/TuSimple/simpledet/
.. _Detectron: https://github.com/facebookresearch/Detectron
__ https://github.com/matterport/Mask_RCNN/
.. |D2| image:: https://github.com/facebookresearch/detectron2/raw/master/.github/Detectron2-Logo-Horz.svg?sanitize=true
:height: 15pt
:target: https://github.com/facebookresearch/detectron2/
.. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg
:width: 15pt
:height: 15pt
:target: https://pytorch.org
.. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png
:width: 15pt
:height: 15pt
:target: https://tensorflow.org
.. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png
:width: 15pt
:height: 15pt
:target: https://mxnet.apache.org/
.. |C2| image:: https://caffe2.ai/static/logo.svg
:width: 15pt
:height: 15pt
:target: https://caffe2.ai
```
Details for each implementation:
* __Detectron2__: with release v0.1.2, run:
```
python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8
```
* __mmdetection__: at commit `b0d845f`, run
```
./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8
```
* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i ‘s/torch.uint8/torch.bool/g’ **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu`
to make it compatible with PyTorch 1.5. Then, run training with
```
python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml
```
The speed we observed is faster than its model zoo, likely due to different software versions.
* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run
```
mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz
```
* __SimpleDet__: at commit `9187a1`, run
```
python detection_train.py --config config/mask_r50v1_fpn_1x.py
```
* __Detectron__: run
```
python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml
```
Note that many of its ops run on CPUs, therefore the performance is limited.
* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run
```
python coco.py train --dataset=/data/coco/ --model=imagenet
```
Note that many small details in this implementation might be different
from Detectron's standards.
<details>
<summary>
(diff to make it use the same hyperparameters - click to expand)
</summary>
```diff
diff --git i/mrcnn/model.py w/mrcnn/model.py
index 62cb2b0..61d7779 100644
--- i/mrcnn/model.py
+++ w/mrcnn/model.py
@@ -2367,8 +2367,8 @@ class MaskRCNN():
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
- validation_data=val_generator,
- validation_steps=self.config.VALIDATION_STEPS,
+ #validation_data=val_generator,
+ #validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py
index d2bf53b..060172a 100644
--- i/mrcnn/parallel_model.py
+++ w/mrcnn/parallel_model.py
@@ -32,6 +32,7 @@ class ParallelModel(KM.Model):
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
+ super().__init__()
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
diff --git i/samples/coco/coco.py w/samples/coco/coco.py
index 5d172b5..239ed75 100644
--- i/samples/coco/coco.py
+++ w/samples/coco/coco.py
@@ -81,7 +81,10 @@ class CocoConfig(Config):
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
- # GPU_COUNT = 8
+ GPU_COUNT = 8
+ BACKBONE = "resnet50"
+ STEPS_PER_EPOCH = 50
+ TRAIN_ROIS_PER_IMAGE = 512
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
@@ -496,29 +499,10 @@ if __name__ == '__main__':
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
- print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
- layers='heads',
- augmentation=augmentation)
-
- # Training - Stage 2
- # Finetune layers from ResNet stage 4 and up
- print("Fine tune Resnet stage 4 and up")
- model.train(dataset_train, dataset_val,
- learning_rate=config.LEARNING_RATE,
- epochs=120,
- layers='4+',
- augmentation=augmentation)
-
- # Training - Stage 3
- # Fine tune all layers
- print("Fine tune all layers")
- model.train(dataset_train, dataset_val,
- learning_rate=config.LEARNING_RATE / 10,
- epochs=160,
- layers='all',
+ layers='3+',
augmentation=augmentation)
elif args.command == "evaluate":
```
</details>
# Change Log
### Releases
See release log at
[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases).
### Notable Backward Incompatible Changes:
* 03/30/2020: Custom box head's `output_size` changed to `output_shape`.
* 02/14/2020,02/18/2020: Mask head and keypoint head now include logic for losses & inference. Custom heads
should overwrite the feature computation by `layers()` method.
* 11/11/2019: `detectron2.data.detection_utils.read_image` transposes images with exif information.
### Config Version Change Log
* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`.
* v2: A batch of rename of many configurations before release.
### Silent Regression in Historical Versions:
We list a few silent regressions since they may silently produce incorrect results and will be hard to debug.
* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True.
* 03/30/2020 - 04/01/2020: ResNets are not correctly built.
* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy.
* release - 11/9/2019: Test time augmentation does not predict the last category.
# Compatibility with Other Libraries
## Compatibility with Detectron (and maskrcnn-benchmark)
Detectron2 addresses some legacy issues left in Detectron. As a result, their models
are not compatible:
running inference with the same model weights will produce different results in the two code bases.
The major differences regarding inference are:
- The height and width of a box with corners (x1, y1) and (x2, y2) is now computed more naturally as
width = x2 - x1 and height = y2 - y1;
In Detectron, a "+ 1" was added both height and width.
Note that the relevant ops in Caffe2 have [adopted this change of convention](https://github.com/pytorch/pytorch/pull/20550)
with an extra option.
So it is still possible to run inference with a Detectron2-trained model in Caffe2.
The change in height/width calculations most notably changes:
- encoding/decoding in bounding box regression.
- non-maximum suppression. The effect here is very negligible, though.
- RPN now uses simpler anchors with fewer quantization artifacts.
In Detectron, the anchors were quantized and
[do not have accurate areas](https://github.com/facebookresearch/Detectron/issues/227).
In Detectron2, the anchors are center-aligned to feature grid points and not quantized.
- Classification layers have a different ordering of class labels.
This involves any trainable parameter with shape (..., num_categories + 1, ...).
In Detectron2, integer labels [0, K-1] correspond to the K = num_categories object categories
and the label "K" corresponds to the special "background" category.
In Detectron, label "0" means background, and labels [1, K] correspond to the K categories.
- ROIAlign is implemented differently. The new implementation is [available in Caffe2](https://github.com/pytorch/pytorch/pull/23706).
1. All the ROIs are shifted by half a pixel compared to Detectron in order to create better image-feature-map alignment.
See `layers/roi_align.py` for details.
To enable the old behavior, use `ROIAlign(aligned=False)`, or `POOLER_TYPE=ROIAlign` instead of
`ROIAlignV2` (the default).
1. The ROIs are not required to have a minimum size of 1.
This will lead to tiny differences in the output, but should be negligible.
- Mask inference function is different.
In Detectron2, the "paste_mask" function is different and should be more accurate than in Detectron. This change
can improve mask AP on COCO by ~0.5% absolute.
There are some other differences in training as well, but they won't affect
model-level compatibility. The major ones are:
- We fixed a [bug](https://github.com/facebookresearch/Detectron/issues/459) in
Detectron, by making `RPN.POST_NMS_TOPK_TRAIN` per-image, rather than per-batch.
The fix may lead to a small accuracy drop for a few models (e.g. keypoint
detection) and will require some parameter tuning to match the Detectron results.
- For simplicity, we change the default loss in bounding box regression to L1 loss, instead of smooth L1 loss.
We have observed that this tends to slightly decrease box AP50 while improving box AP for higher
overlap thresholds (and leading to a slight overall improvement in box AP).
- We interpret the coordinates in COCO bounding box and segmentation annotations
as coordinates in range `[0, width]` or `[0, height]`. The coordinates in
COCO keypoint annotations are interpreted as pixel indices in range `[0, width - 1]` or `[0, height - 1]`.
Note that this affects how flip augmentation is implemented.
We will later share more details and rationale behind the above mentioned issues
about pixels, coordinates, and "+1"s.
## Compatibility with Caffe2
As mentioned above, despite the incompatibilities with Detectron, the relevant
ops have been implemented in Caffe2.
Therefore, models trained with detectron2 can be converted in Caffe2.
See [Deployment](../tutorials/deployment.md) for the tutorial.
## Compatibility with TensorFlow
Most ops are available in TensorFlow, although some tiny differences in
the implementation of resize / ROIAlign / padding need to be addressed.
A working conversion script is provided by [tensorpack FasterRCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2)
to run a standard detectron2 model in TensorFlow.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment