Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
detectron2
Commits
b634945d
Commit
b634945d
authored
Apr 09, 2025
by
limm
Browse files
support v0.6
parent
5b3792fc
Changes
409
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
4853 additions
and
0 deletions
+4853
-0
detectron2/modeling/meta_arch/semantic_seg.py
detectron2/modeling/meta_arch/semantic_seg.py
+260
-0
detectron2/modeling/mmdet_wrapper.py
detectron2/modeling/mmdet_wrapper.py
+274
-0
detectron2/modeling/poolers.py
detectron2/modeling/poolers.py
+245
-0
detectron2/modeling/postprocessing.py
detectron2/modeling/postprocessing.py
+101
-0
detectron2/modeling/proposal_generator/__init__.py
detectron2/modeling/proposal_generator/__init__.py
+5
-0
detectron2/modeling/proposal_generator/build.py
detectron2/modeling/proposal_generator/build.py
+24
-0
detectron2/modeling/proposal_generator/proposal_utils.py
detectron2/modeling/proposal_generator/proposal_utils.py
+200
-0
detectron2/modeling/proposal_generator/rpn.py
detectron2/modeling/proposal_generator/rpn.py
+533
-0
detectron2/modeling/proposal_generator/rrpn.py
detectron2/modeling/proposal_generator/rrpn.py
+207
-0
detectron2/modeling/roi_heads/__init__.py
detectron2/modeling/roi_heads/__init__.py
+29
-0
detectron2/modeling/roi_heads/box_head.py
detectron2/modeling/roi_heads/box_head.py
+118
-0
detectron2/modeling/roi_heads/cascade_rcnn.py
detectron2/modeling/roi_heads/cascade_rcnn.py
+298
-0
detectron2/modeling/roi_heads/fast_rcnn.py
detectron2/modeling/roi_heads/fast_rcnn.py
+485
-0
detectron2/modeling/roi_heads/keypoint_head.py
detectron2/modeling/roi_heads/keypoint_head.py
+272
-0
detectron2/modeling/roi_heads/mask_head.py
detectron2/modeling/roi_heads/mask_head.py
+292
-0
detectron2/modeling/roi_heads/roi_heads.py
detectron2/modeling/roi_heads/roi_heads.py
+877
-0
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
+270
-0
detectron2/modeling/sampling.py
detectron2/modeling/sampling.py
+54
-0
detectron2/modeling/test_time_augmentation.py
detectron2/modeling/test_time_augmentation.py
+307
-0
detectron2/projects/README.md
detectron2/projects/README.md
+2
-0
No files found.
Too many changes to show.
To preserve performance only
409 of 409+
files are displayed.
Plain diff
Email patch
detectron2/modeling/meta_arch/semantic_seg.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
numpy
as
np
from
typing
import
Callable
,
Dict
,
Optional
,
Tuple
,
Union
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ShapeSpec
,
get_norm
from
detectron2.structures
import
ImageList
from
detectron2.utils.registry
import
Registry
from
..backbone
import
Backbone
,
build_backbone
from
..postprocessing
import
sem_seg_postprocess
from
.build
import
META_ARCH_REGISTRY
__all__
=
[
"SemanticSegmentor"
,
"SEM_SEG_HEADS_REGISTRY"
,
"SemSegFPNHead"
,
"build_sem_seg_head"
,
]
SEM_SEG_HEADS_REGISTRY
=
Registry
(
"SEM_SEG_HEADS"
)
SEM_SEG_HEADS_REGISTRY
.
__doc__
=
"""
Registry for semantic segmentation heads, which make semantic segmentation predictions
from feature maps.
"""
@
META_ARCH_REGISTRY
.
register
()
class
SemanticSegmentor
(
nn
.
Module
):
"""
Main class for semantic segmentation architectures.
"""
@
configurable
def
__init__
(
self
,
*
,
backbone
:
Backbone
,
sem_seg_head
:
nn
.
Module
,
pixel_mean
:
Tuple
[
float
],
pixel_std
:
Tuple
[
float
],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super
().
__init__
()
self
.
backbone
=
backbone
self
.
sem_seg_head
=
sem_seg_head
self
.
register_buffer
(
"pixel_mean"
,
torch
.
tensor
(
pixel_mean
).
view
(
-
1
,
1
,
1
),
False
)
self
.
register_buffer
(
"pixel_std"
,
torch
.
tensor
(
pixel_std
).
view
(
-
1
,
1
,
1
),
False
)
@
classmethod
def
from_config
(
cls
,
cfg
):
backbone
=
build_backbone
(
cfg
)
sem_seg_head
=
build_sem_seg_head
(
cfg
,
backbone
.
output_shape
())
return
{
"backbone"
:
backbone
,
"sem_seg_head"
:
sem_seg_head
,
"pixel_mean"
:
cfg
.
MODEL
.
PIXEL_MEAN
,
"pixel_std"
:
cfg
.
MODEL
.
PIXEL_STD
,
}
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
def
forward
(
self
,
batched_inputs
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "sem_seg": semantic segmentation ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "sem_seg" whose value is a
Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
"""
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
self
.
backbone
.
size_divisibility
)
features
=
self
.
backbone
(
images
.
tensor
)
if
"sem_seg"
in
batched_inputs
[
0
]:
targets
=
[
x
[
"sem_seg"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
targets
=
ImageList
.
from_tensors
(
targets
,
self
.
backbone
.
size_divisibility
,
self
.
sem_seg_head
.
ignore_value
).
tensor
else
:
targets
=
None
results
,
losses
=
self
.
sem_seg_head
(
features
,
targets
)
if
self
.
training
:
return
losses
processed_results
=
[]
for
result
,
input_per_image
,
image_size
in
zip
(
results
,
batched_inputs
,
images
.
image_sizes
):
height
=
input_per_image
.
get
(
"height"
,
image_size
[
0
])
width
=
input_per_image
.
get
(
"width"
,
image_size
[
1
])
r
=
sem_seg_postprocess
(
result
,
image_size
,
height
,
width
)
processed_results
.
append
({
"sem_seg"
:
r
})
return
processed_results
def
build_sem_seg_head
(
cfg
,
input_shape
):
"""
Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NAME
return
SEM_SEG_HEADS_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
@
SEM_SEG_HEADS_REGISTRY
.
register
()
class
SemSegFPNHead
(
nn
.
Module
):
"""
A semantic segmentation head described in :paper:`PanopticFPN`.
It takes a list of FPN features as input, and applies a sequence of
3x3 convs and upsampling to scale all of them to the stride defined by
``common_stride``. Then these features are added and used to make final
predictions by another 1x1 conv layer.
"""
@
configurable
def
__init__
(
self
,
input_shape
:
Dict
[
str
,
ShapeSpec
],
*
,
num_classes
:
int
,
conv_dims
:
int
,
common_stride
:
int
,
loss_weight
:
float
=
1.0
,
norm
:
Optional
[
Union
[
str
,
Callable
]]
=
None
,
ignore_value
:
int
=
-
1
,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
conv_dims: number of output channels for the intermediate conv layers.
common_stride: the common stride that all features will be upscaled to
loss_weight: loss weight
norm (str or callable): normalization for all conv layers
ignore_value: category id to be ignored during training.
"""
super
().
__init__
()
input_shape
=
sorted
(
input_shape
.
items
(),
key
=
lambda
x
:
x
[
1
].
stride
)
if
not
len
(
input_shape
):
raise
ValueError
(
"SemSegFPNHead(input_shape=) cannot be empty!"
)
self
.
in_features
=
[
k
for
k
,
v
in
input_shape
]
feature_strides
=
[
v
.
stride
for
k
,
v
in
input_shape
]
feature_channels
=
[
v
.
channels
for
k
,
v
in
input_shape
]
self
.
ignore_value
=
ignore_value
self
.
common_stride
=
common_stride
self
.
loss_weight
=
loss_weight
self
.
scale_heads
=
[]
for
in_feature
,
stride
,
channels
in
zip
(
self
.
in_features
,
feature_strides
,
feature_channels
):
head_ops
=
[]
head_length
=
max
(
1
,
int
(
np
.
log2
(
stride
)
-
np
.
log2
(
self
.
common_stride
)))
for
k
in
range
(
head_length
):
norm_module
=
get_norm
(
norm
,
conv_dims
)
conv
=
Conv2d
(
channels
if
k
==
0
else
conv_dims
,
conv_dims
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
not
norm
,
norm
=
norm_module
,
activation
=
F
.
relu
,
)
weight_init
.
c2_msra_fill
(
conv
)
head_ops
.
append
(
conv
)
if
stride
!=
self
.
common_stride
:
head_ops
.
append
(
nn
.
Upsample
(
scale_factor
=
2
,
mode
=
"bilinear"
,
align_corners
=
False
)
)
self
.
scale_heads
.
append
(
nn
.
Sequential
(
*
head_ops
))
self
.
add_module
(
in_feature
,
self
.
scale_heads
[
-
1
])
self
.
predictor
=
Conv2d
(
conv_dims
,
num_classes
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
)
weight_init
.
c2_msra_fill
(
self
.
predictor
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
return
{
"input_shape"
:
{
k
:
v
for
k
,
v
in
input_shape
.
items
()
if
k
in
cfg
.
MODEL
.
SEM_SEG_HEAD
.
IN_FEATURES
},
"ignore_value"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
IGNORE_VALUE
,
"num_classes"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NUM_CLASSES
,
"conv_dims"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
CONVS_DIM
,
"common_stride"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
COMMON_STRIDE
,
"norm"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NORM
,
"loss_weight"
:
cfg
.
MODEL
.
SEM_SEG_HEAD
.
LOSS_WEIGHT
,
}
def
forward
(
self
,
features
,
targets
=
None
):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x
=
self
.
layers
(
features
)
if
self
.
training
:
return
None
,
self
.
losses
(
x
,
targets
)
else
:
x
=
F
.
interpolate
(
x
,
scale_factor
=
self
.
common_stride
,
mode
=
"bilinear"
,
align_corners
=
False
)
return
x
,
{}
def
layers
(
self
,
features
):
for
i
,
f
in
enumerate
(
self
.
in_features
):
if
i
==
0
:
x
=
self
.
scale_heads
[
i
](
features
[
f
])
else
:
x
=
x
+
self
.
scale_heads
[
i
](
features
[
f
])
x
=
self
.
predictor
(
x
)
return
x
def
losses
(
self
,
predictions
,
targets
):
predictions
=
predictions
.
float
()
# https://github.com/pytorch/pytorch/issues/48163
predictions
=
F
.
interpolate
(
predictions
,
scale_factor
=
self
.
common_stride
,
mode
=
"bilinear"
,
align_corners
=
False
,
)
loss
=
F
.
cross_entropy
(
predictions
,
targets
,
reduction
=
"mean"
,
ignore_index
=
self
.
ignore_value
)
losses
=
{
"loss_sem_seg"
:
loss
*
self
.
loss_weight
}
return
losses
detectron2/modeling/mmdet_wrapper.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
itertools
import
logging
import
numpy
as
np
from
collections
import
OrderedDict
from
collections.abc
import
Mapping
from
typing
import
Dict
,
List
,
Optional
,
Tuple
,
Union
import
torch
from
omegaconf
import
DictConfig
,
OmegaConf
from
torch
import
Tensor
,
nn
from
detectron2.layers
import
ShapeSpec
from
detectron2.structures
import
BitMasks
,
Boxes
,
ImageList
,
Instances
from
detectron2.utils.events
import
get_event_storage
from
.backbone
import
Backbone
logger
=
logging
.
getLogger
(
__name__
)
def
_to_container
(
cfg
):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if
isinstance
(
cfg
,
DictConfig
):
cfg
=
OmegaConf
.
to_container
(
cfg
,
resolve
=
True
)
from
mmcv.utils
import
ConfigDict
return
ConfigDict
(
cfg
)
class
MMDetBackbone
(
Backbone
):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def
__init__
(
self
,
backbone
:
Union
[
nn
.
Module
,
Mapping
],
neck
:
Union
[
nn
.
Module
,
Mapping
,
None
]
=
None
,
*
,
output_shapes
:
List
[
ShapeSpec
],
output_names
:
Optional
[
List
[
str
]]
=
None
,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super
().
__init__
()
if
isinstance
(
backbone
,
Mapping
):
from
mmdet.models
import
build_backbone
backbone
=
build_backbone
(
_to_container
(
backbone
))
self
.
backbone
=
backbone
if
isinstance
(
neck
,
Mapping
):
from
mmdet.models
import
build_neck
neck
=
build_neck
(
_to_container
(
neck
))
self
.
neck
=
neck
# "Neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger
.
info
(
"Initializing mmdet backbone weights..."
)
self
.
backbone
.
init_weights
()
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self
.
backbone
.
train
()
if
self
.
neck
is
not
None
:
logger
.
info
(
"Initializing mmdet neck weights ..."
)
if
isinstance
(
self
.
neck
,
nn
.
Sequential
):
for
m
in
self
.
neck
:
m
.
init_weights
()
else
:
self
.
neck
.
init_weights
()
self
.
neck
.
train
()
self
.
_output_shapes
=
output_shapes
if
not
output_names
:
output_names
=
[
f
"out
{
i
}
"
for
i
in
range
(
len
(
output_shapes
))]
self
.
_output_names
=
output_names
def
forward
(
self
,
x
)
->
Dict
[
str
,
Tensor
]:
outs
=
self
.
backbone
(
x
)
if
self
.
neck
is
not
None
:
outs
=
self
.
neck
(
outs
)
assert
isinstance
(
outs
,
(
list
,
tuple
)
),
"mmdet backbone should return a list/tuple of tensors!"
if
len
(
outs
)
!=
len
(
self
.
_output_shapes
):
raise
ValueError
(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f
"
{
len
(
outs
)
}
!=
{
len
(
self
.
_output_shapes
)
}
"
)
return
{
k
:
v
for
k
,
v
in
zip
(
self
.
_output_names
,
outs
)}
def
output_shape
(
self
)
->
Dict
[
str
,
ShapeSpec
]:
return
{
k
:
v
for
k
,
v
in
zip
(
self
.
_output_names
,
self
.
_output_shapes
)}
class
MMDetDetector
(
nn
.
Module
):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def
__init__
(
self
,
detector
:
Union
[
nn
.
Module
,
Mapping
],
*
,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility
=
32
,
pixel_mean
:
Tuple
[
float
],
pixel_std
:
Tuple
[
float
],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super
().
__init__
()
if
isinstance
(
detector
,
Mapping
):
from
mmdet.models
import
build_detector
detector
=
build_detector
(
_to_container
(
detector
))
self
.
detector
=
detector
self
.
size_divisibility
=
size_divisibility
self
.
register_buffer
(
"pixel_mean"
,
torch
.
tensor
(
pixel_mean
).
view
(
-
1
,
1
,
1
),
False
)
self
.
register_buffer
(
"pixel_std"
,
torch
.
tensor
(
pixel_std
).
view
(
-
1
,
1
,
1
),
False
)
assert
(
self
.
pixel_mean
.
shape
==
self
.
pixel_std
.
shape
),
f
"
{
self
.
pixel_mean
}
and
{
self
.
pixel_std
}
have different shapes!"
def
forward
(
self
,
batched_inputs
:
List
[
Dict
[
str
,
torch
.
Tensor
]]):
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
size_divisibility
=
self
.
size_divisibility
).
tensor
metas
=
[]
rescale
=
{
"height"
in
x
for
x
in
batched_inputs
}
if
len
(
rescale
)
!=
1
:
raise
ValueError
(
"Some inputs have original height/width, but some don't!"
)
rescale
=
list
(
rescale
)[
0
]
output_shapes
=
[]
for
input
in
batched_inputs
:
meta
=
{}
c
,
h
,
w
=
input
[
"image"
].
shape
meta
[
"img_shape"
]
=
meta
[
"ori_shape"
]
=
(
h
,
w
,
c
)
if
rescale
:
scale_factor
=
np
.
array
(
[
w
/
input
[
"width"
],
h
/
input
[
"height"
]]
*
2
,
dtype
=
"float32"
)
ori_shape
=
(
input
[
"height"
],
input
[
"width"
])
output_shapes
.
append
(
ori_shape
)
meta
[
"ori_shape"
]
=
ori_shape
+
(
c
,)
else
:
scale_factor
=
1.0
output_shapes
.
append
((
h
,
w
))
meta
[
"scale_factor"
]
=
scale_factor
meta
[
"flip"
]
=
False
padh
,
padw
=
images
.
shape
[
-
2
:]
meta
[
"pad_shape"
]
=
(
padh
,
padw
,
c
)
metas
.
append
(
meta
)
if
self
.
training
:
gt_instances
=
[
x
[
"instances"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
if
gt_instances
[
0
].
has
(
"gt_masks"
):
from
mmdet.core
import
PolygonMasks
as
mm_PolygonMasks
,
BitmapMasks
as
mm_BitMasks
def
convert_mask
(
m
,
shape
):
# mmdet mask format
if
isinstance
(
m
,
BitMasks
):
return
mm_BitMasks
(
m
.
tensor
.
cpu
().
numpy
(),
shape
[
0
],
shape
[
1
])
else
:
return
mm_PolygonMasks
(
m
.
polygons
,
shape
[
0
],
shape
[
1
])
gt_masks
=
[
convert_mask
(
x
.
gt_masks
,
x
.
image_size
)
for
x
in
gt_instances
]
losses_and_metrics
=
self
.
detector
.
forward_train
(
images
,
metas
,
[
x
.
gt_boxes
.
tensor
for
x
in
gt_instances
],
[
x
.
gt_classes
for
x
in
gt_instances
],
gt_masks
=
gt_masks
,
)
else
:
losses_and_metrics
=
self
.
detector
.
forward_train
(
images
,
metas
,
[
x
.
gt_boxes
.
tensor
for
x
in
gt_instances
],
[
x
.
gt_classes
for
x
in
gt_instances
],
)
return
_parse_losses
(
losses_and_metrics
)
else
:
results
=
self
.
detector
.
simple_test
(
images
,
metas
,
rescale
=
rescale
)
results
=
[
{
"instances"
:
_convert_mmdet_result
(
r
,
shape
)}
for
r
,
shape
in
zip
(
results
,
output_shapes
)
]
return
results
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def
_convert_mmdet_result
(
result
,
shape
:
Tuple
[
int
,
int
])
->
Instances
:
if
isinstance
(
result
,
tuple
):
bbox_result
,
segm_result
=
result
if
isinstance
(
segm_result
,
tuple
):
segm_result
=
segm_result
[
0
]
else
:
bbox_result
,
segm_result
=
result
,
None
bboxes
=
torch
.
from_numpy
(
np
.
vstack
(
bbox_result
))
# Nx5
bboxes
,
scores
=
bboxes
[:,
:
4
],
bboxes
[:,
-
1
]
labels
=
[
torch
.
full
((
bbox
.
shape
[
0
],),
i
,
dtype
=
torch
.
int32
)
for
i
,
bbox
in
enumerate
(
bbox_result
)
]
labels
=
torch
.
cat
(
labels
)
inst
=
Instances
(
shape
)
inst
.
pred_boxes
=
Boxes
(
bboxes
)
inst
.
scores
=
scores
inst
.
pred_classes
=
labels
if
segm_result
is
not
None
and
len
(
labels
)
>
0
:
segm_result
=
list
(
itertools
.
chain
(
*
segm_result
))
segm_result
=
[
torch
.
from_numpy
(
x
)
if
isinstance
(
x
,
np
.
ndarray
)
else
x
for
x
in
segm_result
]
segm_result
=
torch
.
stack
(
segm_result
,
dim
=
0
)
inst
.
pred_masks
=
segm_result
return
inst
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def
_parse_losses
(
losses
:
Dict
[
str
,
Tensor
])
->
Dict
[
str
,
Tensor
]:
log_vars
=
OrderedDict
()
for
loss_name
,
loss_value
in
losses
.
items
():
if
isinstance
(
loss_value
,
torch
.
Tensor
):
log_vars
[
loss_name
]
=
loss_value
.
mean
()
elif
isinstance
(
loss_value
,
list
):
log_vars
[
loss_name
]
=
sum
(
_loss
.
mean
()
for
_loss
in
loss_value
)
else
:
raise
TypeError
(
f
"
{
loss_name
}
is not a tensor or list of tensors"
)
if
"loss"
not
in
loss_name
:
# put metrics to storage; don't return them
storage
=
get_event_storage
()
value
=
log_vars
.
pop
(
loss_name
).
cpu
().
item
()
storage
.
put_scalar
(
loss_name
,
value
)
return
log_vars
detectron2/modeling/poolers.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
math
from
typing
import
List
import
torch
from
torch
import
nn
from
torchvision.ops
import
RoIPool
from
detectron2.layers
import
ROIAlign
,
ROIAlignRotated
,
cat
,
nonzero_tuple
,
shapes_to_tensor
from
detectron2.structures
import
Boxes
"""
To export ROIPooler to torchscript, in this file, variables that should be annotated with
`Union[List[Boxes], List[RotatedBoxes]]` are only annotated with `List[Boxes]`.
TODO: Correct these annotations when torchscript support `Union`.
https://github.com/pytorch/pytorch/issues/41412
"""
__all__
=
[
"ROIPooler"
]
def
assign_boxes_to_levels
(
box_lists
:
List
[
Boxes
],
min_level
:
int
,
max_level
:
int
,
canonical_box_size
:
int
,
canonical_level
:
int
,
):
"""
Map each box in `box_lists` to a feature map level index and return the assignment
vector.
Args:
box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes,
where N is the number of images in the batch.
min_level (int): Smallest feature map level index. The input is considered index 0,
the output of stage 1 is index 1, and so.
max_level (int): Largest feature map level index.
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)).
canonical_level (int): The feature map level index on which a canonically-sized box
should be placed.
Returns:
A tensor of length M, where M is the total number of boxes aggregated over all
N batch images. The memory layout corresponds to the concatenation of boxes
from all images. Each element is the feature map index, as an offset from
`self.min_level`, for the corresponding box (so value i means the box is at
`self.min_level + i`).
"""
box_sizes
=
torch
.
sqrt
(
cat
([
boxes
.
area
()
for
boxes
in
box_lists
]))
# Eqn.(1) in FPN paper
level_assignments
=
torch
.
floor
(
canonical_level
+
torch
.
log2
(
box_sizes
/
canonical_box_size
+
1e-8
)
)
# clamp level to (min, max), in case the box size is too large or too small
# for the available feature maps
level_assignments
=
torch
.
clamp
(
level_assignments
,
min
=
min_level
,
max
=
max_level
)
return
level_assignments
.
to
(
torch
.
int64
)
-
min_level
def
convert_boxes_to_pooler_format
(
box_lists
:
List
[
Boxes
]):
"""
Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops
(see description under Returns).
Args:
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
Returns:
When input is list[Boxes]:
A tensor of shape (M, 5), where M is the total number of boxes aggregated over all
N batch images.
The 5 columns are (batch index, x0, y0, x1, y1), where batch index
is the index in [0, N) identifying which batch image the box with corners at
(x0, y0, x1, y1) comes from.
When input is list[RotatedBoxes]:
A tensor of shape (M, 6), where M is the total number of boxes aggregated over all
N batch images.
The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees),
where batch index is the index in [0, N) identifying which batch image the
rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from.
"""
boxes
=
torch
.
cat
([
x
.
tensor
for
x
in
box_lists
],
dim
=
0
)
# __len__ returns Tensor in tracing.
sizes
=
shapes_to_tensor
([
x
.
__len__
()
for
x
in
box_lists
],
device
=
boxes
.
device
)
indices
=
torch
.
repeat_interleave
(
torch
.
arange
(
len
(
box_lists
),
dtype
=
boxes
.
dtype
,
device
=
boxes
.
device
),
sizes
)
return
cat
([
indices
[:,
None
],
boxes
],
dim
=
1
)
class
ROIPooler
(
nn
.
Module
):
"""
Region of interest feature map pooler that supports pooling from one or more
feature maps.
"""
def
__init__
(
self
,
output_size
,
scales
,
sampling_ratio
,
pooler_type
,
canonical_box_size
=
224
,
canonical_level
=
4
,
):
"""
Args:
output_size (int, tuple[int] or list[int]): output size of the pooled region,
e.g., 14 x 14. If tuple or list is given, the length must be 2.
scales (list[float]): The scale for each low-level pooling op relative to
the input image. For a feature map with stride s relative to the input
image, scale is defined as 1/s. The stride must be power of 2.
When there are multiple scales, they must form a pyramid, i.e. they must be
a monotically decreasing geometric sequence with a factor of 1/2.
sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
pooler_type (string): Name of the type of pooling operation that should be applied.
For instance, "ROIPool" or "ROIAlignV2".
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
pre-training).
canonical_level (int): The feature map level index from which a canonically-sized box
should be placed. The default is defined as level 4 (stride=16) in the FPN paper,
i.e., a box of size 224x224 will be placed on the feature with stride=16.
The box placement for all boxes will be determined from their sizes w.r.t
canonical_box_size. For example, a box whose area is 4x that of a canonical box
should be used to pool features from feature level ``canonical_level+1``.
Note that the actual input feature maps given to this module may not have
sufficiently many levels for the input boxes. If the boxes are too large or too
small for the input feature maps, the closest level will be used.
"""
super
().
__init__
()
if
isinstance
(
output_size
,
int
):
output_size
=
(
output_size
,
output_size
)
assert
len
(
output_size
)
==
2
assert
isinstance
(
output_size
[
0
],
int
)
and
isinstance
(
output_size
[
1
],
int
)
self
.
output_size
=
output_size
if
pooler_type
==
"ROIAlign"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlign
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
,
aligned
=
False
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIAlignV2"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlign
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
,
aligned
=
True
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIPool"
:
self
.
level_poolers
=
nn
.
ModuleList
(
RoIPool
(
output_size
,
spatial_scale
=
scale
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIAlignRotated"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlignRotated
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
)
for
scale
in
scales
)
else
:
raise
ValueError
(
"Unknown pooler type: {}"
.
format
(
pooler_type
))
# Map scale (defined as 1 / stride) to its feature map level under the
# assumption that stride is a power of 2.
min_level
=
-
(
math
.
log2
(
scales
[
0
]))
max_level
=
-
(
math
.
log2
(
scales
[
-
1
]))
assert
math
.
isclose
(
min_level
,
int
(
min_level
))
and
math
.
isclose
(
max_level
,
int
(
max_level
)
),
"Featuremap stride is not power of 2!"
self
.
min_level
=
int
(
min_level
)
self
.
max_level
=
int
(
max_level
)
assert
(
len
(
scales
)
==
self
.
max_level
-
self
.
min_level
+
1
),
"[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
assert
0
<=
self
.
min_level
and
self
.
min_level
<=
self
.
max_level
self
.
canonical_level
=
canonical_level
assert
canonical_box_size
>
0
self
.
canonical_box_size
=
canonical_box_size
def
forward
(
self
,
x
:
List
[
torch
.
Tensor
],
box_lists
:
List
[
Boxes
]):
"""
Args:
x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those
used to construct this module.
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
The box coordinates are defined on the original image and
will be scaled by the `scales` argument of :class:`ROIPooler`.
Returns:
Tensor:
A tensor of shape (M, C, output_size, output_size) where M is the total number of
boxes aggregated over all N batch images and C is the number of channels in `x`.
"""
num_level_assignments
=
len
(
self
.
level_poolers
)
assert
isinstance
(
x
,
list
)
and
isinstance
(
box_lists
,
list
),
"Arguments to pooler must be lists"
assert
(
len
(
x
)
==
num_level_assignments
),
"unequal value, num_level_assignments={}, but x is list of {} Tensors"
.
format
(
num_level_assignments
,
len
(
x
)
)
assert
len
(
box_lists
)
==
x
[
0
].
size
(
0
),
"unequal value, x[0] batch dim 0 is {}, but box_list has length {}"
.
format
(
x
[
0
].
size
(
0
),
len
(
box_lists
)
)
if
len
(
box_lists
)
==
0
:
return
torch
.
zeros
(
(
0
,
x
[
0
].
shape
[
1
])
+
self
.
output_size
,
device
=
x
[
0
].
device
,
dtype
=
x
[
0
].
dtype
)
pooler_fmt_boxes
=
convert_boxes_to_pooler_format
(
box_lists
)
if
num_level_assignments
==
1
:
return
self
.
level_poolers
[
0
](
x
[
0
],
pooler_fmt_boxes
)
level_assignments
=
assign_boxes_to_levels
(
box_lists
,
self
.
min_level
,
self
.
max_level
,
self
.
canonical_box_size
,
self
.
canonical_level
)
num_boxes
=
pooler_fmt_boxes
.
size
(
0
)
num_channels
=
x
[
0
].
shape
[
1
]
output_size
=
self
.
output_size
[
0
]
dtype
,
device
=
x
[
0
].
dtype
,
x
[
0
].
device
output
=
torch
.
zeros
(
(
num_boxes
,
num_channels
,
output_size
,
output_size
),
dtype
=
dtype
,
device
=
device
)
for
level
,
pooler
in
enumerate
(
self
.
level_poolers
):
inds
=
nonzero_tuple
(
level_assignments
==
level
)[
0
]
pooler_fmt_boxes_level
=
pooler_fmt_boxes
[
inds
]
# Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852
output
.
index_put_
((
inds
,),
pooler
(
x
[
level
],
pooler_fmt_boxes_level
))
return
output
detectron2/modeling/postprocessing.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
torch
from
torch.nn
import
functional
as
F
from
detectron2.structures
import
Instances
,
ROIMasks
# perhaps should rename to "resize_instance"
def
detector_postprocess
(
results
:
Instances
,
output_height
:
int
,
output_width
:
int
,
mask_threshold
:
float
=
0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
# Change to 'if is_tracing' after PT1.7
if
isinstance
(
output_height
,
torch
.
Tensor
):
# Converts integer tensors to float temporaries to ensure true
# division is performed when computing scale_x and scale_y.
output_width_tmp
=
output_width
.
float
()
output_height_tmp
=
output_height
.
float
()
new_size
=
torch
.
stack
([
output_height
,
output_width
])
else
:
new_size
=
(
output_height
,
output_width
)
output_width_tmp
=
output_width
output_height_tmp
=
output_height
scale_x
,
scale_y
=
(
output_width_tmp
/
results
.
image_size
[
1
],
output_height_tmp
/
results
.
image_size
[
0
],
)
results
=
Instances
(
new_size
,
**
results
.
get_fields
())
if
results
.
has
(
"pred_boxes"
):
output_boxes
=
results
.
pred_boxes
elif
results
.
has
(
"proposal_boxes"
):
output_boxes
=
results
.
proposal_boxes
else
:
output_boxes
=
None
assert
output_boxes
is
not
None
,
"Predictions must contain boxes!"
output_boxes
.
scale
(
scale_x
,
scale_y
)
output_boxes
.
clip
(
results
.
image_size
)
results
=
results
[
output_boxes
.
nonempty
()]
if
results
.
has
(
"pred_masks"
):
if
isinstance
(
results
.
pred_masks
,
ROIMasks
):
roi_masks
=
results
.
pred_masks
else
:
# pred_masks is a tensor of shape (N, 1, M, M)
roi_masks
=
ROIMasks
(
results
.
pred_masks
[:,
0
,
:,
:])
results
.
pred_masks
=
roi_masks
.
to_bitmasks
(
results
.
pred_boxes
,
output_height
,
output_width
,
mask_threshold
).
tensor
# TODO return ROIMasks/BitMask object in the future
if
results
.
has
(
"pred_keypoints"
):
results
.
pred_keypoints
[:,
:,
0
]
*=
scale_x
results
.
pred_keypoints
[:,
:,
1
]
*=
scale_y
return
results
def
sem_seg_postprocess
(
result
,
img_size
,
output_height
,
output_width
):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result
=
result
[:,
:
img_size
[
0
],
:
img_size
[
1
]].
expand
(
1
,
-
1
,
-
1
,
-
1
)
result
=
F
.
interpolate
(
result
,
size
=
(
output_height
,
output_width
),
mode
=
"bilinear"
,
align_corners
=
False
)[
0
]
return
result
detectron2/modeling/proposal_generator/__init__.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
,
build_proposal_generator
from
.rpn
import
RPN_HEAD_REGISTRY
,
build_rpn_head
,
RPN
,
StandardRPNHead
__all__
=
list
(
globals
().
keys
())
detectron2/modeling/proposal_generator/build.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
detectron2.utils.registry
import
Registry
PROPOSAL_GENERATOR_REGISTRY
=
Registry
(
"PROPOSAL_GENERATOR"
)
PROPOSAL_GENERATOR_REGISTRY
.
__doc__
=
"""
Registry for proposal generator, which produces object proposals from feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
from
.
import
rpn
,
rrpn
# noqa F401 isort:skip
def
build_proposal_generator
(
cfg
,
input_shape
):
"""
Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
The name can be "PrecomputedProposals" to use no proposal generator.
"""
name
=
cfg
.
MODEL
.
PROPOSAL_GENERATOR
.
NAME
if
name
==
"PrecomputedProposals"
:
return
None
return
PROPOSAL_GENERATOR_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/proposal_generator/proposal_utils.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
logging
import
math
from
typing
import
List
,
Tuple
,
Union
import
torch
from
detectron2.layers
import
batched_nms
,
cat
from
detectron2.structures
import
Boxes
,
Instances
logger
=
logging
.
getLogger
(
__name__
)
def
_is_tracing
():
# (fixed in TORCH_VERSION >= 1.9)
if
torch
.
jit
.
is_scripting
():
# https://github.com/pytorch/pytorch/issues/47379
return
False
else
:
return
torch
.
jit
.
is_tracing
()
def
find_top_rpn_proposals
(
proposals
:
List
[
torch
.
Tensor
],
pred_objectness_logits
:
List
[
torch
.
Tensor
],
image_sizes
:
List
[
Tuple
[
int
,
int
]],
nms_thresh
:
float
,
pre_nms_topk
:
int
,
post_nms_topk
:
int
,
min_box_size
:
float
,
training
:
bool
,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps for each image.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
image_sizes (list[tuple]): sizes (h, w) for each image
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_size (float): minimum proposal box side length in pixels (absolute units
wrt input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
list[Instances]: list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
num_images
=
len
(
image_sizes
)
device
=
proposals
[
0
].
device
# 1. Select top-k anchor for every level and every image
topk_scores
=
[]
# #lvl Tensor, each of shape N x topk
topk_proposals
=
[]
level_ids
=
[]
# #lvl Tensor, each of shape (topk,)
batch_idx
=
torch
.
arange
(
num_images
,
device
=
device
)
for
level_id
,
(
proposals_i
,
logits_i
)
in
enumerate
(
zip
(
proposals
,
pred_objectness_logits
)):
Hi_Wi_A
=
logits_i
.
shape
[
1
]
if
isinstance
(
Hi_Wi_A
,
torch
.
Tensor
):
# it's a tensor in tracing
num_proposals_i
=
torch
.
clamp
(
Hi_Wi_A
,
max
=
pre_nms_topk
)
else
:
num_proposals_i
=
min
(
Hi_Wi_A
,
pre_nms_topk
)
# sort is faster than topk: https://github.com/pytorch/pytorch/issues/22812
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i
,
idx
=
logits_i
.
sort
(
descending
=
True
,
dim
=
1
)
topk_scores_i
=
logits_i
.
narrow
(
1
,
0
,
num_proposals_i
)
topk_idx
=
idx
.
narrow
(
1
,
0
,
num_proposals_i
)
# each is N x topk
topk_proposals_i
=
proposals_i
[
batch_idx
[:,
None
],
topk_idx
]
# N x topk x 4
topk_proposals
.
append
(
topk_proposals_i
)
topk_scores
.
append
(
topk_scores_i
)
level_ids
.
append
(
torch
.
full
((
num_proposals_i
,),
level_id
,
dtype
=
torch
.
int64
,
device
=
device
))
# 2. Concat all levels together
topk_scores
=
cat
(
topk_scores
,
dim
=
1
)
topk_proposals
=
cat
(
topk_proposals
,
dim
=
1
)
level_ids
=
cat
(
level_ids
,
dim
=
0
)
# 3. For each image, run a per-level NMS, and choose topk results.
results
:
List
[
Instances
]
=
[]
for
n
,
image_size
in
enumerate
(
image_sizes
):
boxes
=
Boxes
(
topk_proposals
[
n
])
scores_per_img
=
topk_scores
[
n
]
lvl
=
level_ids
valid_mask
=
torch
.
isfinite
(
boxes
.
tensor
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores_per_img
)
if
not
valid_mask
.
all
():
if
training
:
raise
FloatingPointError
(
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
)
boxes
=
boxes
[
valid_mask
]
scores_per_img
=
scores_per_img
[
valid_mask
]
lvl
=
lvl
[
valid_mask
]
boxes
.
clip
(
image_size
)
# filter empty boxes
keep
=
boxes
.
nonempty
(
threshold
=
min_box_size
)
if
_is_tracing
()
or
keep
.
sum
().
item
()
!=
len
(
boxes
):
boxes
,
scores_per_img
,
lvl
=
boxes
[
keep
],
scores_per_img
[
keep
],
lvl
[
keep
]
keep
=
batched_nms
(
boxes
.
tensor
,
scores_per_img
,
lvl
,
nms_thresh
)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep
=
keep
[:
post_nms_topk
]
# keep is already sorted
res
=
Instances
(
image_size
)
res
.
proposal_boxes
=
boxes
[
keep
]
res
.
objectness_logits
=
scores_per_img
[
keep
]
results
.
append
(
res
)
return
results
def
add_ground_truth_to_proposals
(
gt
:
Union
[
List
[
Instances
],
List
[
Boxes
]],
proposals
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Call `add_ground_truth_to_proposals_single_image` for all images.
Args:
gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances
representing the ground-truth for image i.
proposals (list[Instances]): list of N elements. Element i is a Instances
representing the proposals for image i.
Returns:
list[Instances]: list of N Instances. Each is the proposals for the image,
with field "proposal_boxes" and "objectness_logits".
"""
assert
gt
is
not
None
if
len
(
proposals
)
!=
len
(
gt
):
raise
ValueError
(
"proposals and gt should have the same length as the number of images!"
)
if
len
(
proposals
)
==
0
:
return
proposals
return
[
add_ground_truth_to_proposals_single_image
(
gt_i
,
proposals_i
)
for
gt_i
,
proposals_i
in
zip
(
gt
,
proposals
)
]
def
add_ground_truth_to_proposals_single_image
(
gt
:
Union
[
Instances
,
Boxes
],
proposals
:
Instances
)
->
Instances
:
"""
Augment `proposals` with `gt`.
Args:
Same as `add_ground_truth_to_proposals`, but with gt and proposals
per image.
Returns:
Same as `add_ground_truth_to_proposals`, but for only one image.
"""
if
isinstance
(
gt
,
Boxes
):
# convert Boxes to Instances
gt
=
Instances
(
proposals
.
image_size
,
gt_boxes
=
gt
)
gt_boxes
=
gt
.
gt_boxes
device
=
proposals
.
objectness_logits
.
device
# Assign all ground-truth boxes an objectness logit corresponding to
# P(object) = sigmoid(logit) =~ 1.
gt_logit_value
=
math
.
log
((
1.0
-
1e-10
)
/
(
1
-
(
1.0
-
1e-10
)))
gt_logits
=
gt_logit_value
*
torch
.
ones
(
len
(
gt_boxes
),
device
=
device
)
# Concatenating gt_boxes with proposals requires them to have the same fields
gt_proposal
=
Instances
(
proposals
.
image_size
,
**
gt
.
get_fields
())
gt_proposal
.
proposal_boxes
=
gt_boxes
gt_proposal
.
objectness_logits
=
gt_logits
for
key
in
proposals
.
get_fields
().
keys
():
assert
gt_proposal
.
has
(
key
),
"The attribute '{}' in `proposals` does not exist in `gt`"
.
format
(
key
)
# NOTE: Instances.cat only use fields from the first item. Extra fields in latter items
# will be thrown away.
new_proposals
=
Instances
.
cat
([
proposals
,
gt_proposal
])
return
new_proposals
detectron2/modeling/proposal_generator/rpn.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
typing
import
Dict
,
List
,
Optional
,
Tuple
,
Union
import
torch
import
torch.nn.functional
as
F
from
torch
import
nn
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ShapeSpec
,
cat
from
detectron2.structures
import
Boxes
,
ImageList
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.memory
import
retry_if_cuda_oom
from
detectron2.utils.registry
import
Registry
from
..anchor_generator
import
build_anchor_generator
from
..box_regression
import
Box2BoxTransform
,
_dense_box_regression_loss
from
..matcher
import
Matcher
from
..sampling
import
subsample_labels
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
from
.proposal_utils
import
find_top_rpn_proposals
RPN_HEAD_REGISTRY
=
Registry
(
"RPN_HEAD"
)
RPN_HEAD_REGISTRY
.
__doc__
=
"""
Registry for RPN heads, which take feature maps and perform
objectness classification and bounding box regression for anchors.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
"""
Shape shorthand in this module:
N: number of images in the minibatch
L: number of feature maps per image on which RPN is run
A: number of cell anchors (must be the same for all feature maps)
Hi, Wi: height and width of the i-th feature map
B: size of the box parameterization
Naming convention:
objectness: refers to the binary classification of an anchor as object vs. not object.
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes.
pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
sigmoid(pred_objectness_logits) to estimate P(object).
gt_labels: ground-truth binary classification labels for objectness
pred_anchor_deltas: predicted box2box transform deltas
gt_anchor_deltas: ground-truth box2box transform deltas
"""
def
build_rpn_head
(
cfg
,
input_shape
):
"""
Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
"""
name
=
cfg
.
MODEL
.
RPN
.
HEAD_NAME
return
RPN_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
@
RPN_HEAD_REGISTRY
.
register
()
class
StandardRPNHead
(
nn
.
Module
):
"""
Standard RPN classification and regression heads described in :paper:`Faster R-CNN`.
Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts
objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas
specifying how to deform each anchor into an object proposal.
"""
@
configurable
def
__init__
(
self
,
*
,
in_channels
:
int
,
num_anchors
:
int
,
box_dim
:
int
=
4
,
conv_dims
:
List
[
int
]
=
(
-
1
,)
):
"""
NOTE: this interface is experimental.
Args:
in_channels (int): number of input feature channels. When using multiple
input features, they must have the same number of channels.
num_anchors (int): number of anchors to predict for *each spatial position*
on the feature map. The total number of anchors for each
feature map will be `num_anchors * H * W`.
box_dim (int): dimension of a box, which is also the number of box regression
predictions to make for each anchor. An axis aligned box has
box_dim=4, while a rotated box has box_dim=5.
conv_dims (list[int]): a list of integers representing the output channels
of N conv layers. Set it to -1 to use the same number of output channels
as input channels.
"""
super
().
__init__
()
cur_channels
=
in_channels
# Keeping the old variable names and structure for backwards compatiblity.
# Otherwise the old checkpoints will fail to load.
if
len
(
conv_dims
)
==
1
:
out_channels
=
cur_channels
if
conv_dims
[
0
]
==
-
1
else
conv_dims
[
0
]
# 3x3 conv for the hidden representation
self
.
conv
=
self
.
_get_rpn_conv
(
cur_channels
,
out_channels
)
cur_channels
=
out_channels
else
:
self
.
conv
=
nn
.
Sequential
()
for
k
,
conv_dim
in
enumerate
(
conv_dims
):
out_channels
=
cur_channels
if
conv_dim
==
-
1
else
conv_dim
if
out_channels
<=
0
:
raise
ValueError
(
f
"Conv output channels should be greater than 0. Got
{
out_channels
}
"
)
conv
=
self
.
_get_rpn_conv
(
cur_channels
,
out_channels
)
self
.
conv
.
add_module
(
f
"conv
{
k
}
"
,
conv
)
cur_channels
=
out_channels
# 1x1 conv for predicting objectness logits
self
.
objectness_logits
=
nn
.
Conv2d
(
cur_channels
,
num_anchors
,
kernel_size
=
1
,
stride
=
1
)
# 1x1 conv for predicting box2box transform deltas
self
.
anchor_deltas
=
nn
.
Conv2d
(
cur_channels
,
num_anchors
*
box_dim
,
kernel_size
=
1
,
stride
=
1
)
# Keeping the order of weights initialization same for backwards compatiblility.
for
layer
in
self
.
modules
():
if
isinstance
(
layer
,
nn
.
Conv2d
):
nn
.
init
.
normal_
(
layer
.
weight
,
std
=
0.01
)
nn
.
init
.
constant_
(
layer
.
bias
,
0
)
def
_get_rpn_conv
(
self
,
in_channels
,
out_channels
):
return
Conv2d
(
in_channels
,
out_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
activation
=
nn
.
ReLU
(),
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
# Standard RPN is shared across levels:
in_channels
=
[
s
.
channels
for
s
in
input_shape
]
assert
len
(
set
(
in_channels
))
==
1
,
"Each level must have the same channel!"
in_channels
=
in_channels
[
0
]
# RPNHead should take the same input as anchor generator
# NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
anchor_generator
=
build_anchor_generator
(
cfg
,
input_shape
)
num_anchors
=
anchor_generator
.
num_anchors
box_dim
=
anchor_generator
.
box_dim
assert
(
len
(
set
(
num_anchors
))
==
1
),
"Each level must have the same number of anchors per spatial position"
return
{
"in_channels"
:
in_channels
,
"num_anchors"
:
num_anchors
[
0
],
"box_dim"
:
box_dim
,
"conv_dims"
:
cfg
.
MODEL
.
RPN
.
CONV_DIMS
,
}
def
forward
(
self
,
features
:
List
[
torch
.
Tensor
]):
"""
Args:
features (list[Tensor]): list of feature maps
Returns:
list[Tensor]: A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for all anchors. A is the number of cell anchors.
list[Tensor]: A list of L elements. Element i is a tensor of shape
(N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
"""
pred_objectness_logits
=
[]
pred_anchor_deltas
=
[]
for
x
in
features
:
t
=
self
.
conv
(
x
)
pred_objectness_logits
.
append
(
self
.
objectness_logits
(
t
))
pred_anchor_deltas
.
append
(
self
.
anchor_deltas
(
t
))
return
pred_objectness_logits
,
pred_anchor_deltas
@
PROPOSAL_GENERATOR_REGISTRY
.
register
()
class
RPN
(
nn
.
Module
):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
@
configurable
def
__init__
(
self
,
*
,
in_features
:
List
[
str
],
head
:
nn
.
Module
,
anchor_generator
:
nn
.
Module
,
anchor_matcher
:
Matcher
,
box2box_transform
:
Box2BoxTransform
,
batch_size_per_image
:
int
,
positive_fraction
:
float
,
pre_nms_topk
:
Tuple
[
float
,
float
],
post_nms_topk
:
Tuple
[
float
,
float
],
nms_thresh
:
float
=
0.7
,
min_box_size
:
float
=
0.0
,
anchor_boundary_thresh
:
float
=
-
1.0
,
loss_weight
:
Union
[
float
,
Dict
[
str
,
float
]]
=
1.0
,
box_reg_loss_type
:
str
=
"smooth_l1"
,
smooth_l1_beta
:
float
=
0.0
,
):
"""
NOTE: this interface is experimental.
Args:
in_features (list[str]): list of names of input features to use
head (nn.Module): a module that predicts logits and regression deltas
for each level from a list of per-level features
anchor_generator (nn.Module): a module that creates anchors from a
list of features. Usually an instance of :class:`AnchorGenerator`
anchor_matcher (Matcher): label the anchors by matching them with ground truth.
box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to
instance boxes
batch_size_per_image (int): number of anchors per image to sample for training
positive_fraction (float): fraction of foreground anchors to sample for training
pre_nms_topk (tuple[float]): (train, test) that represents the
number of top k proposals to select before NMS, in
training and testing.
post_nms_topk (tuple[float]): (train, test) that represents the
number of top k proposals to select after NMS, in
training and testing.
nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals
min_box_size (float): remove proposal boxes with any side smaller than this threshold,
in the unit of input image pixels
anchor_boundary_thresh (float): legacy option
loss_weight (float|dict): weights to use for losses. Can be single float for weighting
all rpn losses together, or a dict of individual weightings. Valid dict keys are:
"loss_rpn_cls" - applied to classification loss
"loss_rpn_loc" - applied to box regression loss
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
super
().
__init__
()
self
.
in_features
=
in_features
self
.
rpn_head
=
head
self
.
anchor_generator
=
anchor_generator
self
.
anchor_matcher
=
anchor_matcher
self
.
box2box_transform
=
box2box_transform
self
.
batch_size_per_image
=
batch_size_per_image
self
.
positive_fraction
=
positive_fraction
# Map from self.training state to train/test settings
self
.
pre_nms_topk
=
{
True
:
pre_nms_topk
[
0
],
False
:
pre_nms_topk
[
1
]}
self
.
post_nms_topk
=
{
True
:
post_nms_topk
[
0
],
False
:
post_nms_topk
[
1
]}
self
.
nms_thresh
=
nms_thresh
self
.
min_box_size
=
float
(
min_box_size
)
self
.
anchor_boundary_thresh
=
anchor_boundary_thresh
if
isinstance
(
loss_weight
,
float
):
loss_weight
=
{
"loss_rpn_cls"
:
loss_weight
,
"loss_rpn_loc"
:
loss_weight
}
self
.
loss_weight
=
loss_weight
self
.
box_reg_loss_type
=
box_reg_loss_type
self
.
smooth_l1_beta
=
smooth_l1_beta
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
in_features
=
cfg
.
MODEL
.
RPN
.
IN_FEATURES
ret
=
{
"in_features"
:
in_features
,
"min_box_size"
:
cfg
.
MODEL
.
PROPOSAL_GENERATOR
.
MIN_SIZE
,
"nms_thresh"
:
cfg
.
MODEL
.
RPN
.
NMS_THRESH
,
"batch_size_per_image"
:
cfg
.
MODEL
.
RPN
.
BATCH_SIZE_PER_IMAGE
,
"positive_fraction"
:
cfg
.
MODEL
.
RPN
.
POSITIVE_FRACTION
,
"loss_weight"
:
{
"loss_rpn_cls"
:
cfg
.
MODEL
.
RPN
.
LOSS_WEIGHT
,
"loss_rpn_loc"
:
cfg
.
MODEL
.
RPN
.
BBOX_REG_LOSS_WEIGHT
*
cfg
.
MODEL
.
RPN
.
LOSS_WEIGHT
,
},
"anchor_boundary_thresh"
:
cfg
.
MODEL
.
RPN
.
BOUNDARY_THRESH
,
"box2box_transform"
:
Box2BoxTransform
(
weights
=
cfg
.
MODEL
.
RPN
.
BBOX_REG_WEIGHTS
),
"box_reg_loss_type"
:
cfg
.
MODEL
.
RPN
.
BBOX_REG_LOSS_TYPE
,
"smooth_l1_beta"
:
cfg
.
MODEL
.
RPN
.
SMOOTH_L1_BETA
,
}
ret
[
"pre_nms_topk"
]
=
(
cfg
.
MODEL
.
RPN
.
PRE_NMS_TOPK_TRAIN
,
cfg
.
MODEL
.
RPN
.
PRE_NMS_TOPK_TEST
)
ret
[
"post_nms_topk"
]
=
(
cfg
.
MODEL
.
RPN
.
POST_NMS_TOPK_TRAIN
,
cfg
.
MODEL
.
RPN
.
POST_NMS_TOPK_TEST
)
ret
[
"anchor_generator"
]
=
build_anchor_generator
(
cfg
,
[
input_shape
[
f
]
for
f
in
in_features
])
ret
[
"anchor_matcher"
]
=
Matcher
(
cfg
.
MODEL
.
RPN
.
IOU_THRESHOLDS
,
cfg
.
MODEL
.
RPN
.
IOU_LABELS
,
allow_low_quality_matches
=
True
)
ret
[
"head"
]
=
build_rpn_head
(
cfg
,
[
input_shape
[
f
]
for
f
in
in_features
])
return
ret
def
_subsample_labels
(
self
,
label
):
"""
Randomly sample a subset of positive and negative examples, and overwrite
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
Args:
labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
"""
pos_idx
,
neg_idx
=
subsample_labels
(
label
,
self
.
batch_size_per_image
,
self
.
positive_fraction
,
0
)
# Fill with the ignore label (-1), then set positive and negative labels
label
.
fill_
(
-
1
)
label
.
scatter_
(
0
,
pos_idx
,
1
)
label
.
scatter_
(
0
,
neg_idx
,
0
)
return
label
@
torch
.
jit
.
unused
@
torch
.
no_grad
()
def
label_and_sample_anchors
(
self
,
anchors
:
List
[
Boxes
],
gt_instances
:
List
[
Instances
]
)
->
Tuple
[
List
[
torch
.
Tensor
],
List
[
torch
.
Tensor
]]:
"""
Args:
anchors (list[Boxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across all feature maps R = sum(Hi * Wi * A).
Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative
class; 1 = positive class.
list[Tensor]:
i-th element is a Rx4 tensor. The values are the matched gt boxes for each
anchor. Values are undefined for those anchors not labeled as 1.
"""
anchors
=
Boxes
.
cat
(
anchors
)
gt_boxes
=
[
x
.
gt_boxes
for
x
in
gt_instances
]
image_sizes
=
[
x
.
image_size
for
x
in
gt_instances
]
del
gt_instances
gt_labels
=
[]
matched_gt_boxes
=
[]
for
image_size_i
,
gt_boxes_i
in
zip
(
image_sizes
,
gt_boxes
):
"""
image_size_i: (h, w) for the i-th image
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix
=
retry_if_cuda_oom
(
pairwise_iou
)(
gt_boxes_i
,
anchors
)
matched_idxs
,
gt_labels_i
=
retry_if_cuda_oom
(
self
.
anchor_matcher
)(
match_quality_matrix
)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i
=
gt_labels_i
.
to
(
device
=
gt_boxes_i
.
device
)
del
match_quality_matrix
if
self
.
anchor_boundary_thresh
>=
0
:
# Discard anchors that go out of the boundaries of the image
# NOTE: This is legacy functionality that is turned off by default in Detectron2
anchors_inside_image
=
anchors
.
inside_box
(
image_size_i
,
self
.
anchor_boundary_thresh
)
gt_labels_i
[
~
anchors_inside_image
]
=
-
1
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i
=
self
.
_subsample_labels
(
gt_labels_i
)
if
len
(
gt_boxes_i
)
==
0
:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i
=
torch
.
zeros_like
(
anchors
.
tensor
)
else
:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i
=
gt_boxes_i
[
matched_idxs
].
tensor
gt_labels
.
append
(
gt_labels_i
)
# N,AHW
matched_gt_boxes
.
append
(
matched_gt_boxes_i
)
return
gt_labels
,
matched_gt_boxes
@
torch
.
jit
.
unused
def
losses
(
self
,
anchors
:
List
[
Boxes
],
pred_objectness_logits
:
List
[
torch
.
Tensor
],
gt_labels
:
List
[
torch
.
Tensor
],
pred_anchor_deltas
:
List
[
torch
.
Tensor
],
gt_boxes
:
List
[
torch
.
Tensor
],
)
->
Dict
[
str
,
torch
.
Tensor
]:
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Args:
anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each
has shape (Hi*Wi*A, B), where B is box dimension (4 or 5).
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, Hi*Wi*A) representing
the predicted objectness logits for all anchors.
gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors
to proposals.
gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
num_images
=
len
(
gt_labels
)
gt_labels
=
torch
.
stack
(
gt_labels
)
# (N, sum(Hi*Wi*Ai))
# Log the number of positive/negative anchors per-image that's used in training
pos_mask
=
gt_labels
==
1
num_pos_anchors
=
pos_mask
.
sum
().
item
()
num_neg_anchors
=
(
gt_labels
==
0
).
sum
().
item
()
storage
=
get_event_storage
()
storage
.
put_scalar
(
"rpn/num_pos_anchors"
,
num_pos_anchors
/
num_images
)
storage
.
put_scalar
(
"rpn/num_neg_anchors"
,
num_neg_anchors
/
num_images
)
localization_loss
=
_dense_box_regression_loss
(
anchors
,
self
.
box2box_transform
,
pred_anchor_deltas
,
gt_boxes
,
pos_mask
,
box_reg_loss_type
=
self
.
box_reg_loss_type
,
smooth_l1_beta
=
self
.
smooth_l1_beta
,
)
valid_mask
=
gt_labels
>=
0
objectness_loss
=
F
.
binary_cross_entropy_with_logits
(
cat
(
pred_objectness_logits
,
dim
=
1
)[
valid_mask
],
gt_labels
[
valid_mask
].
to
(
torch
.
float32
),
reduction
=
"sum"
,
)
normalizer
=
self
.
batch_size_per_image
*
num_images
losses
=
{
"loss_rpn_cls"
:
objectness_loss
/
normalizer
,
# The original Faster R-CNN paper uses a slightly different normalizer
# for loc loss. But it doesn't matter in practice
"loss_rpn_loc"
:
localization_loss
/
normalizer
,
}
losses
=
{
k
:
v
*
self
.
loss_weight
.
get
(
k
,
1.0
)
for
k
,
v
in
losses
.
items
()}
return
losses
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
gt_instances
:
Optional
[
List
[
Instances
]]
=
None
,
):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str, Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits"
loss: dict[Tensor] or None
"""
features
=
[
features
[
f
]
for
f
in
self
.
in_features
]
anchors
=
self
.
anchor_generator
(
features
)
pred_objectness_logits
,
pred_anchor_deltas
=
self
.
rpn_head
(
features
)
# Transpose the Hi*Wi*A dimension to the middle:
pred_objectness_logits
=
[
# (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score
.
permute
(
0
,
2
,
3
,
1
).
flatten
(
1
)
for
score
in
pred_objectness_logits
]
pred_anchor_deltas
=
[
# (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
x
.
view
(
x
.
shape
[
0
],
-
1
,
self
.
anchor_generator
.
box_dim
,
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
.
permute
(
0
,
3
,
4
,
1
,
2
)
.
flatten
(
1
,
-
2
)
for
x
in
pred_anchor_deltas
]
if
self
.
training
:
assert
gt_instances
is
not
None
,
"RPN requires gt_instances in training!"
gt_labels
,
gt_boxes
=
self
.
label_and_sample_anchors
(
anchors
,
gt_instances
)
losses
=
self
.
losses
(
anchors
,
pred_objectness_logits
,
gt_labels
,
pred_anchor_deltas
,
gt_boxes
)
else
:
losses
=
{}
proposals
=
self
.
predict_proposals
(
anchors
,
pred_objectness_logits
,
pred_anchor_deltas
,
images
.
image_sizes
)
return
proposals
,
losses
def
predict_proposals
(
self
,
anchors
:
List
[
Boxes
],
pred_objectness_logits
:
List
[
torch
.
Tensor
],
pred_anchor_deltas
:
List
[
torch
.
Tensor
],
image_sizes
:
List
[
Tuple
[
int
,
int
]],
):
"""
Decode all the predicted box regression deltas to proposals. Find the top proposals
by applying NMS and removing boxes that are too small.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
# The proposals are treated as fixed for joint training with roi heads.
# This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that
# are also network responses.
with
torch
.
no_grad
():
pred_proposals
=
self
.
_decode_proposals
(
anchors
,
pred_anchor_deltas
)
return
find_top_rpn_proposals
(
pred_proposals
,
pred_objectness_logits
,
image_sizes
,
self
.
nms_thresh
,
self
.
pre_nms_topk
[
self
.
training
],
self
.
post_nms_topk
[
self
.
training
],
self
.
min_box_size
,
self
.
training
,
)
def
_decode_proposals
(
self
,
anchors
:
List
[
Boxes
],
pred_anchor_deltas
:
List
[
torch
.
Tensor
]):
"""
Transform anchors into proposals by applying the predicted anchor deltas.
Returns:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A, B)
"""
N
=
pred_anchor_deltas
[
0
].
shape
[
0
]
proposals
=
[]
# For each feature map
for
anchors_i
,
pred_anchor_deltas_i
in
zip
(
anchors
,
pred_anchor_deltas
):
B
=
anchors_i
.
tensor
.
size
(
1
)
pred_anchor_deltas_i
=
pred_anchor_deltas_i
.
reshape
(
-
1
,
B
)
# Expand anchors to shape (N*Hi*Wi*A, B)
anchors_i
=
anchors_i
.
tensor
.
unsqueeze
(
0
).
expand
(
N
,
-
1
,
-
1
).
reshape
(
-
1
,
B
)
proposals_i
=
self
.
box2box_transform
.
apply_deltas
(
pred_anchor_deltas_i
,
anchors_i
)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals
.
append
(
proposals_i
.
view
(
N
,
-
1
,
B
))
return
proposals
detectron2/modeling/proposal_generator/rrpn.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
itertools
import
logging
from
typing
import
Dict
,
List
import
torch
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
,
batched_nms_rotated
,
cat
from
detectron2.structures
import
Instances
,
RotatedBoxes
,
pairwise_iou_rotated
from
detectron2.utils.memory
import
retry_if_cuda_oom
from
..box_regression
import
Box2BoxTransformRotated
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
from
.proposal_utils
import
_is_tracing
from
.rpn
import
RPN
logger
=
logging
.
getLogger
(
__name__
)
def
find_top_rrpn_proposals
(
proposals
,
pred_objectness_logits
,
image_sizes
,
nms_thresh
,
pre_nms_topk
,
post_nms_topk
,
min_box_size
,
training
,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
image_sizes (list[tuple]): sizes (h, w) for each image
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_size(float): minimum proposal box side length in pixels (absolute units wrt
input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
num_images
=
len
(
image_sizes
)
device
=
proposals
[
0
].
device
# 1. Select top-k anchor for every level and every image
topk_scores
=
[]
# #lvl Tensor, each of shape N x topk
topk_proposals
=
[]
level_ids
=
[]
# #lvl Tensor, each of shape (topk,)
batch_idx
=
torch
.
arange
(
num_images
,
device
=
device
)
for
level_id
,
proposals_i
,
logits_i
in
zip
(
itertools
.
count
(),
proposals
,
pred_objectness_logits
):
Hi_Wi_A
=
logits_i
.
shape
[
1
]
if
isinstance
(
Hi_Wi_A
,
torch
.
Tensor
):
# it's a tensor in tracing
num_proposals_i
=
torch
.
clamp
(
Hi_Wi_A
,
max
=
pre_nms_topk
)
else
:
num_proposals_i
=
min
(
Hi_Wi_A
,
pre_nms_topk
)
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i
,
idx
=
logits_i
.
sort
(
descending
=
True
,
dim
=
1
)
topk_scores_i
=
logits_i
[
batch_idx
,
:
num_proposals_i
]
topk_idx
=
idx
[
batch_idx
,
:
num_proposals_i
]
# each is N x topk
topk_proposals_i
=
proposals_i
[
batch_idx
[:,
None
],
topk_idx
]
# N x topk x 5
topk_proposals
.
append
(
topk_proposals_i
)
topk_scores
.
append
(
topk_scores_i
)
level_ids
.
append
(
torch
.
full
((
num_proposals_i
,),
level_id
,
dtype
=
torch
.
int64
,
device
=
device
))
# 2. Concat all levels together
topk_scores
=
cat
(
topk_scores
,
dim
=
1
)
topk_proposals
=
cat
(
topk_proposals
,
dim
=
1
)
level_ids
=
cat
(
level_ids
,
dim
=
0
)
# 3. For each image, run a per-level NMS, and choose topk results.
results
=
[]
for
n
,
image_size
in
enumerate
(
image_sizes
):
boxes
=
RotatedBoxes
(
topk_proposals
[
n
])
scores_per_img
=
topk_scores
[
n
]
valid_mask
=
torch
.
isfinite
(
boxes
.
tensor
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores_per_img
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores_per_img
=
scores_per_img
[
valid_mask
]
boxes
.
clip
(
image_size
)
# filter empty boxes
keep
=
boxes
.
nonempty
(
threshold
=
min_box_size
)
lvl
=
level_ids
if
_is_tracing
()
or
keep
.
sum
().
item
()
!=
len
(
boxes
):
boxes
,
scores_per_img
,
lvl
=
(
boxes
[
keep
],
scores_per_img
[
keep
],
level_ids
[
keep
])
keep
=
batched_nms_rotated
(
boxes
.
tensor
,
scores_per_img
,
lvl
,
nms_thresh
)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep
=
keep
[:
post_nms_topk
]
res
=
Instances
(
image_size
)
res
.
proposal_boxes
=
boxes
[
keep
]
res
.
objectness_logits
=
scores_per_img
[
keep
]
results
.
append
(
res
)
return
results
@
PROPOSAL_GENERATOR_REGISTRY
.
register
()
class
RRPN
(
RPN
):
"""
Rotated Region Proposal Network described in :paper:`RRPN`.
"""
@
configurable
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
().
__init__
(
*
args
,
**
kwargs
)
if
self
.
anchor_boundary_thresh
>=
0
:
raise
NotImplementedError
(
"anchor_boundary_thresh is a legacy option not implemented for RRPN."
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
ret
[
"box2box_transform"
]
=
Box2BoxTransformRotated
(
weights
=
cfg
.
MODEL
.
RPN
.
BBOX_REG_WEIGHTS
)
return
ret
@
torch
.
no_grad
()
def
label_and_sample_anchors
(
self
,
anchors
:
List
[
RotatedBoxes
],
gt_instances
:
List
[
Instances
]):
"""
Args:
anchors (list[RotatedBoxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across feature maps. Label values are in {-1, 0, 1},
with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
list[Tensor]:
i-th element is a Nx5 tensor, where N is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as 1.
"""
anchors
=
RotatedBoxes
.
cat
(
anchors
)
gt_boxes
=
[
x
.
gt_boxes
for
x
in
gt_instances
]
del
gt_instances
gt_labels
=
[]
matched_gt_boxes
=
[]
for
gt_boxes_i
in
gt_boxes
:
"""
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix
=
retry_if_cuda_oom
(
pairwise_iou_rotated
)(
gt_boxes_i
,
anchors
)
matched_idxs
,
gt_labels_i
=
retry_if_cuda_oom
(
self
.
anchor_matcher
)(
match_quality_matrix
)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i
=
gt_labels_i
.
to
(
device
=
gt_boxes_i
.
device
)
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i
=
self
.
_subsample_labels
(
gt_labels_i
)
if
len
(
gt_boxes_i
)
==
0
:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i
=
torch
.
zeros_like
(
anchors
.
tensor
)
else
:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i
=
gt_boxes_i
[
matched_idxs
].
tensor
gt_labels
.
append
(
gt_labels_i
)
# N,AHW
matched_gt_boxes
.
append
(
matched_gt_boxes_i
)
return
gt_labels
,
matched_gt_boxes
@
torch
.
no_grad
()
def
predict_proposals
(
self
,
anchors
,
pred_objectness_logits
,
pred_anchor_deltas
,
image_sizes
):
pred_proposals
=
self
.
_decode_proposals
(
anchors
,
pred_anchor_deltas
)
return
find_top_rrpn_proposals
(
pred_proposals
,
pred_objectness_logits
,
image_sizes
,
self
.
nms_thresh
,
self
.
pre_nms_topk
[
self
.
training
],
self
.
post_nms_topk
[
self
.
training
],
self
.
min_box_size
,
self
.
training
,
)
detectron2/modeling/roi_heads/__init__.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
.box_head
import
ROI_BOX_HEAD_REGISTRY
,
build_box_head
,
FastRCNNConvFCHead
from
.keypoint_head
import
(
ROI_KEYPOINT_HEAD_REGISTRY
,
build_keypoint_head
,
BaseKeypointRCNNHead
,
KRCNNConvDeconvUpsampleHead
,
)
from
.mask_head
import
(
ROI_MASK_HEAD_REGISTRY
,
build_mask_head
,
BaseMaskRCNNHead
,
MaskRCNNConvUpsampleHead
,
)
from
.roi_heads
import
(
ROI_HEADS_REGISTRY
,
ROIHeads
,
Res5ROIHeads
,
StandardROIHeads
,
build_roi_heads
,
select_foreground_proposals
,
)
from
.cascade_rcnn
import
CascadeROIHeads
from
.rotated_fast_rcnn
import
RROIHeads
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.
import
cascade_rcnn
# isort:skip
__all__
=
list
(
globals
().
keys
())
detectron2/modeling/roi_heads/box_head.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
numpy
as
np
from
typing
import
List
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ShapeSpec
,
get_norm
from
detectron2.utils.registry
import
Registry
__all__
=
[
"FastRCNNConvFCHead"
,
"build_box_head"
,
"ROI_BOX_HEAD_REGISTRY"
]
ROI_BOX_HEAD_REGISTRY
=
Registry
(
"ROI_BOX_HEAD"
)
ROI_BOX_HEAD_REGISTRY
.
__doc__
=
"""
Registry for box heads, which make box predictions from per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
# To get torchscript support, we make the head a subclass of `nn.Sequential`.
# Therefore, to add new layers in this head class, please make sure they are
# added in the order they will be used in forward().
@
ROI_BOX_HEAD_REGISTRY
.
register
()
class
FastRCNNConvFCHead
(
nn
.
Sequential
):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu).
"""
@
configurable
def
__init__
(
self
,
input_shape
:
ShapeSpec
,
*
,
conv_dims
:
List
[
int
],
fc_dims
:
List
[
int
],
conv_norm
=
""
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
"""
super
().
__init__
()
assert
len
(
conv_dims
)
+
len
(
fc_dims
)
>
0
self
.
_output_size
=
(
input_shape
.
channels
,
input_shape
.
height
,
input_shape
.
width
)
self
.
conv_norm_relus
=
[]
for
k
,
conv_dim
in
enumerate
(
conv_dims
):
conv
=
Conv2d
(
self
.
_output_size
[
0
],
conv_dim
,
kernel_size
=
3
,
padding
=
1
,
bias
=
not
conv_norm
,
norm
=
get_norm
(
conv_norm
,
conv_dim
),
activation
=
nn
.
ReLU
(),
)
self
.
add_module
(
"conv{}"
.
format
(
k
+
1
),
conv
)
self
.
conv_norm_relus
.
append
(
conv
)
self
.
_output_size
=
(
conv_dim
,
self
.
_output_size
[
1
],
self
.
_output_size
[
2
])
self
.
fcs
=
[]
for
k
,
fc_dim
in
enumerate
(
fc_dims
):
if
k
==
0
:
self
.
add_module
(
"flatten"
,
nn
.
Flatten
())
fc
=
nn
.
Linear
(
int
(
np
.
prod
(
self
.
_output_size
)),
fc_dim
)
self
.
add_module
(
"fc{}"
.
format
(
k
+
1
),
fc
)
self
.
add_module
(
"fc_relu{}"
.
format
(
k
+
1
),
nn
.
ReLU
())
self
.
fcs
.
append
(
fc
)
self
.
_output_size
=
fc_dim
for
layer
in
self
.
conv_norm_relus
:
weight_init
.
c2_msra_fill
(
layer
)
for
layer
in
self
.
fcs
:
weight_init
.
c2_xavier_fill
(
layer
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
num_conv
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NUM_CONV
conv_dim
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CONV_DIM
num_fc
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NUM_FC
fc_dim
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
FC_DIM
return
{
"input_shape"
:
input_shape
,
"conv_dims"
:
[
conv_dim
]
*
num_conv
,
"fc_dims"
:
[
fc_dim
]
*
num_fc
,
"conv_norm"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NORM
,
}
def
forward
(
self
,
x
):
for
layer
in
self
:
x
=
layer
(
x
)
return
x
@
property
@
torch
.
jit
.
unused
def
output_shape
(
self
):
"""
Returns:
ShapeSpec: the output feature shape
"""
o
=
self
.
_output_size
if
isinstance
(
o
,
int
):
return
ShapeSpec
(
channels
=
o
)
else
:
return
ShapeSpec
(
channels
=
o
[
0
],
height
=
o
[
1
],
width
=
o
[
2
])
def
build_box_head
(
cfg
,
input_shape
):
"""
Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NAME
return
ROI_BOX_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/roi_heads/cascade_rcnn.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
typing
import
List
import
torch
from
torch
import
nn
from
torch.autograd.function
import
Function
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
from
detectron2.structures
import
Boxes
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
..box_regression
import
Box2BoxTransform
from
..matcher
import
Matcher
from
..poolers
import
ROIPooler
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
,
fast_rcnn_inference
from
.roi_heads
import
ROI_HEADS_REGISTRY
,
StandardROIHeads
class
_ScaleGradient
(
Function
):
@
staticmethod
def
forward
(
ctx
,
input
,
scale
):
ctx
.
scale
=
scale
return
input
@
staticmethod
def
backward
(
ctx
,
grad_output
):
return
grad_output
*
ctx
.
scale
,
None
@
ROI_HEADS_REGISTRY
.
register
()
class
CascadeROIHeads
(
StandardROIHeads
):
"""
The ROI heads that implement :paper:`Cascade R-CNN`.
"""
@
configurable
def
__init__
(
self
,
*
,
box_in_features
:
List
[
str
],
box_pooler
:
ROIPooler
,
box_heads
:
List
[
nn
.
Module
],
box_predictors
:
List
[
nn
.
Module
],
proposal_matchers
:
List
[
Matcher
],
**
kwargs
,
):
"""
NOTE: this interface is experimental.
Args:
box_pooler (ROIPooler): pooler that extracts region features from given boxes
box_heads (list[nn.Module]): box head for each cascade stage
box_predictors (list[nn.Module]): box predictor for each cascade stage
proposal_matchers (list[Matcher]): matcher with different IoU thresholds to
match boxes with ground truth for each stage. The first matcher matches
RPN proposals with ground truth, the other matchers use boxes predicted
by the previous stage as proposals and match them with ground truth.
"""
assert
"proposal_matcher"
not
in
kwargs
,
(
"CascadeROIHeads takes 'proposal_matchers=' for each stage instead "
"of one 'proposal_matcher='."
)
# The first matcher matches RPN proposals with ground truth, done in the base class
kwargs
[
"proposal_matcher"
]
=
proposal_matchers
[
0
]
num_stages
=
self
.
num_cascade_stages
=
len
(
box_heads
)
box_heads
=
nn
.
ModuleList
(
box_heads
)
box_predictors
=
nn
.
ModuleList
(
box_predictors
)
assert
len
(
box_predictors
)
==
num_stages
,
f
"
{
len
(
box_predictors
)
}
!=
{
num_stages
}
!"
assert
len
(
proposal_matchers
)
==
num_stages
,
f
"
{
len
(
proposal_matchers
)
}
!=
{
num_stages
}
!"
super
().
__init__
(
box_in_features
=
box_in_features
,
box_pooler
=
box_pooler
,
box_head
=
box_heads
,
box_predictor
=
box_predictors
,
**
kwargs
,
)
self
.
proposal_matchers
=
proposal_matchers
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
ret
.
pop
(
"proposal_matcher"
)
return
ret
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
cascade_bbox_reg_weights
=
cfg
.
MODEL
.
ROI_BOX_CASCADE_HEAD
.
BBOX_REG_WEIGHTS
cascade_ious
=
cfg
.
MODEL
.
ROI_BOX_CASCADE_HEAD
.
IOUS
assert
len
(
cascade_bbox_reg_weights
)
==
len
(
cascade_ious
)
assert
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CLS_AGNOSTIC_BBOX_REG
,
\
"CascadeROIHeads only support class-agnostic regression now!"
assert
cascade_ious
[
0
]
==
cfg
.
MODEL
.
ROI_HEADS
.
IOU_THRESHOLDS
[
0
]
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
]
# Check all channel counts are equal
assert
len
(
set
(
in_channels
))
==
1
,
in_channels
in_channels
=
in_channels
[
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
pooled_shape
=
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
box_heads
,
box_predictors
,
proposal_matchers
=
[],
[],
[]
for
match_iou
,
bbox_reg_weights
in
zip
(
cascade_ious
,
cascade_bbox_reg_weights
):
box_head
=
build_box_head
(
cfg
,
pooled_shape
)
box_heads
.
append
(
box_head
)
box_predictors
.
append
(
FastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
,
box2box_transform
=
Box2BoxTransform
(
weights
=
bbox_reg_weights
),
)
)
proposal_matchers
.
append
(
Matcher
([
match_iou
],
[
0
,
1
],
allow_low_quality_matches
=
False
))
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_heads"
:
box_heads
,
"box_predictors"
:
box_predictors
,
"proposal_matchers"
:
proposal_matchers
,
}
def
forward
(
self
,
images
,
features
,
proposals
,
targets
=
None
):
del
images
if
self
.
training
:
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
if
self
.
training
:
# Need targets to box head
losses
=
self
.
_forward_box
(
features
,
proposals
,
targets
)
losses
.
update
(
self
.
_forward_mask
(
features
,
proposals
))
losses
.
update
(
self
.
_forward_keypoint
(
features
,
proposals
))
return
proposals
,
losses
else
:
pred_instances
=
self
.
_forward_box
(
features
,
proposals
)
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
_forward_box
(
self
,
features
,
proposals
,
targets
=
None
):
"""
Args:
features, targets: the same as in
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
"""
features
=
[
features
[
f
]
for
f
in
self
.
box_in_features
]
head_outputs
=
[]
# (predictor, predictions, proposals)
prev_pred_boxes
=
None
image_sizes
=
[
x
.
image_size
for
x
in
proposals
]
for
k
in
range
(
self
.
num_cascade_stages
):
if
k
>
0
:
# The output boxes of the previous stage are used to create the input
# proposals of the next stage.
proposals
=
self
.
_create_proposals_from_boxes
(
prev_pred_boxes
,
image_sizes
)
if
self
.
training
:
proposals
=
self
.
_match_and_label_boxes
(
proposals
,
k
,
targets
)
predictions
=
self
.
_run_stage
(
features
,
proposals
,
k
)
prev_pred_boxes
=
self
.
box_predictor
[
k
].
predict_boxes
(
predictions
,
proposals
)
head_outputs
.
append
((
self
.
box_predictor
[
k
],
predictions
,
proposals
))
if
self
.
training
:
losses
=
{}
storage
=
get_event_storage
()
for
stage
,
(
predictor
,
predictions
,
proposals
)
in
enumerate
(
head_outputs
):
with
storage
.
name_scope
(
"stage{}"
.
format
(
stage
)):
stage_losses
=
predictor
.
losses
(
predictions
,
proposals
)
losses
.
update
({
k
+
"_stage{}"
.
format
(
stage
):
v
for
k
,
v
in
stage_losses
.
items
()})
return
losses
else
:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage
=
[
h
[
0
].
predict_probs
(
h
[
1
],
h
[
2
])
for
h
in
head_outputs
]
# Average the scores across heads
scores
=
[
sum
(
list
(
scores_per_image
))
*
(
1.0
/
self
.
num_cascade_stages
)
for
scores_per_image
in
zip
(
*
scores_per_stage
)
]
# Use the boxes of the last head
predictor
,
predictions
,
proposals
=
head_outputs
[
-
1
]
boxes
=
predictor
.
predict_boxes
(
predictions
,
proposals
)
pred_instances
,
_
=
fast_rcnn_inference
(
boxes
,
scores
,
image_sizes
,
predictor
.
test_score_thresh
,
predictor
.
test_nms_thresh
,
predictor
.
test_topk_per_image
,
)
return
pred_instances
@
torch
.
no_grad
()
def
_match_and_label_boxes
(
self
,
proposals
,
stage
,
targets
):
"""
Match proposals with groundtruth using the matcher at the given stage.
Label the proposals as foreground or background based on the match.
Args:
proposals (list[Instances]): One Instances for each image, with
the field "proposal_boxes".
stage (int): the current stage
targets (list[Instances]): the ground truth instances
Returns:
list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes"
"""
num_fg_samples
,
num_bg_samples
=
[],
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
match_quality_matrix
=
pairwise_iou
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
# proposal_labels are 0 or 1
matched_idxs
,
proposal_labels
=
self
.
proposal_matchers
[
stage
](
match_quality_matrix
)
if
len
(
targets_per_image
)
>
0
:
gt_classes
=
targets_per_image
.
gt_classes
[
matched_idxs
]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes
[
proposal_labels
==
0
]
=
self
.
num_classes
gt_boxes
=
targets_per_image
.
gt_boxes
[
matched_idxs
]
else
:
gt_classes
=
torch
.
zeros_like
(
matched_idxs
)
+
self
.
num_classes
gt_boxes
=
Boxes
(
targets_per_image
.
gt_boxes
.
tensor
.
new_zeros
((
len
(
proposals_per_image
),
4
))
)
proposals_per_image
.
gt_classes
=
gt_classes
proposals_per_image
.
gt_boxes
=
gt_boxes
num_fg_samples
.
append
((
proposal_labels
==
1
).
sum
().
item
())
num_bg_samples
.
append
(
proposal_labels
.
numel
()
-
num_fg_samples
[
-
1
])
# Log the number of fg/bg samples in each stage
storage
=
get_event_storage
()
storage
.
put_scalar
(
"stage{}/roi_head/num_fg_samples"
.
format
(
stage
),
sum
(
num_fg_samples
)
/
len
(
num_fg_samples
),
)
storage
.
put_scalar
(
"stage{}/roi_head/num_bg_samples"
.
format
(
stage
),
sum
(
num_bg_samples
)
/
len
(
num_bg_samples
),
)
return
proposals
def
_run_stage
(
self
,
features
,
proposals
,
stage
):
"""
Args:
features (list[Tensor]): #lvl input features to ROIHeads
proposals (list[Instances]): #image Instances, with the field "proposal_boxes"
stage (int): the current stage
Returns:
Same output as `FastRCNNOutputLayers.forward()`.
"""
box_features
=
self
.
box_pooler
(
features
,
[
x
.
proposal_boxes
for
x
in
proposals
])
# The original implementation averages the losses among heads,
# but scale up the parameter gradients of the heads.
# This is equivalent to adding the losses among heads,
# but scale down the gradients on features.
box_features
=
_ScaleGradient
.
apply
(
box_features
,
1.0
/
self
.
num_cascade_stages
)
box_features
=
self
.
box_head
[
stage
](
box_features
)
return
self
.
box_predictor
[
stage
](
box_features
)
def
_create_proposals_from_boxes
(
self
,
boxes
,
image_sizes
):
"""
Args:
boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4
image_sizes (list[tuple]): list of image shapes in (h, w)
Returns:
list[Instances]: per-image proposals with the given boxes.
"""
# Just like RPN, the proposals should not have gradients
boxes
=
[
Boxes
(
b
.
detach
())
for
b
in
boxes
]
proposals
=
[]
for
boxes_per_image
,
image_size
in
zip
(
boxes
,
image_sizes
):
boxes_per_image
.
clip
(
image_size
)
if
self
.
training
:
# do not filter empty boxes at inference time,
# because the scores from each stage need to be aligned and added later
boxes_per_image
=
boxes_per_image
[
boxes_per_image
.
nonempty
()]
prop
=
Instances
(
image_size
)
prop
.
proposal_boxes
=
boxes_per_image
proposals
.
append
(
prop
)
return
proposals
detectron2/modeling/roi_heads/fast_rcnn.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
logging
from
typing
import
Dict
,
List
,
Tuple
,
Union
import
torch
from
fvcore.nn
import
giou_loss
,
smooth_l1_loss
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
(
ShapeSpec
,
batched_nms
,
cat
,
ciou_loss
,
cross_entropy
,
diou_loss
,
nonzero_tuple
,
)
from
detectron2.modeling.box_regression
import
Box2BoxTransform
from
detectron2.structures
import
Boxes
,
Instances
from
detectron2.utils.events
import
get_event_storage
__all__
=
[
"fast_rcnn_inference"
,
"FastRCNNOutputLayers"
]
logger
=
logging
.
getLogger
(
__name__
)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
def
fast_rcnn_inference
(
boxes
:
List
[
torch
.
Tensor
],
scores
:
List
[
torch
.
Tensor
],
image_shapes
:
List
[
Tuple
[
int
,
int
]],
score_thresh
:
float
,
nms_thresh
:
float
,
topk_per_image
:
int
,
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image
=
[
fast_rcnn_inference_single_image
(
boxes_per_image
,
scores_per_image
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
)
for
scores_per_image
,
boxes_per_image
,
image_shape
in
zip
(
scores
,
boxes
,
image_shapes
)
]
return
[
x
[
0
]
for
x
in
result_per_image
],
[
x
[
1
]
for
x
in
result_per_image
]
def
_log_classification_stats
(
pred_logits
,
gt_classes
,
prefix
=
"fast_rcnn"
):
"""
Log the classification metrics to EventStorage.
Args:
pred_logits: Rx(K+1) logits. The last column is for background class.
gt_classes: R labels
"""
num_instances
=
gt_classes
.
numel
()
if
num_instances
==
0
:
return
pred_classes
=
pred_logits
.
argmax
(
dim
=
1
)
bg_class_ind
=
pred_logits
.
shape
[
1
]
-
1
fg_inds
=
(
gt_classes
>=
0
)
&
(
gt_classes
<
bg_class_ind
)
num_fg
=
fg_inds
.
nonzero
().
numel
()
fg_gt_classes
=
gt_classes
[
fg_inds
]
fg_pred_classes
=
pred_classes
[
fg_inds
]
num_false_negative
=
(
fg_pred_classes
==
bg_class_ind
).
nonzero
().
numel
()
num_accurate
=
(
pred_classes
==
gt_classes
).
nonzero
().
numel
()
fg_num_accurate
=
(
fg_pred_classes
==
fg_gt_classes
).
nonzero
().
numel
()
storage
=
get_event_storage
()
storage
.
put_scalar
(
f
"
{
prefix
}
/cls_accuracy"
,
num_accurate
/
num_instances
)
if
num_fg
>
0
:
storage
.
put_scalar
(
f
"
{
prefix
}
/fg_cls_accuracy"
,
fg_num_accurate
/
num_fg
)
storage
.
put_scalar
(
f
"
{
prefix
}
/false_negative"
,
num_false_negative
/
num_fg
)
def
fast_rcnn_inference_single_image
(
boxes
,
scores
,
image_shape
:
Tuple
[
int
,
int
],
score_thresh
:
float
,
nms_thresh
:
float
,
topk_per_image
:
int
,
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask
=
torch
.
isfinite
(
boxes
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores
).
all
(
dim
=
1
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores
=
scores
[
valid_mask
]
scores
=
scores
[:,
:
-
1
]
num_bbox_reg_classes
=
boxes
.
shape
[
1
]
//
4
# Convert to Boxes to use the `clip` function ...
boxes
=
Boxes
(
boxes
.
reshape
(
-
1
,
4
))
boxes
.
clip
(
image_shape
)
boxes
=
boxes
.
tensor
.
view
(
-
1
,
num_bbox_reg_classes
,
4
)
# R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask
=
scores
>
score_thresh
# R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds
=
filter_mask
.
nonzero
()
if
num_bbox_reg_classes
==
1
:
boxes
=
boxes
[
filter_inds
[:,
0
],
0
]
else
:
boxes
=
boxes
[
filter_mask
]
scores
=
scores
[
filter_mask
]
# 2. Apply NMS for each class independently.
keep
=
batched_nms
(
boxes
,
scores
,
filter_inds
[:,
1
],
nms_thresh
)
if
topk_per_image
>=
0
:
keep
=
keep
[:
topk_per_image
]
boxes
,
scores
,
filter_inds
=
boxes
[
keep
],
scores
[
keep
],
filter_inds
[
keep
]
result
=
Instances
(
image_shape
)
result
.
pred_boxes
=
Boxes
(
boxes
)
result
.
scores
=
scores
result
.
pred_classes
=
filter_inds
[:,
1
]
return
result
,
filter_inds
[:,
0
]
class
FastRCNNOutputLayers
(
nn
.
Module
):
"""
Two linear layers for predicting Fast R-CNN outputs:
1. proposal-to-detection box regression deltas
2. classification scores
"""
@
configurable
def
__init__
(
self
,
input_shape
:
ShapeSpec
,
*
,
box2box_transform
,
num_classes
:
int
,
test_score_thresh
:
float
=
0.0
,
test_nms_thresh
:
float
=
0.5
,
test_topk_per_image
:
int
=
100
,
cls_agnostic_bbox_reg
:
bool
=
False
,
smooth_l1_beta
:
float
=
0.0
,
box_reg_loss_type
:
str
=
"smooth_l1"
,
loss_weight
:
Union
[
float
,
Dict
[
str
,
float
]]
=
1.0
,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if
`box_reg_loss_type` is "smooth_l1"
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou",
"diou", "ciou"
loss_weight (float|dict): weights to use for losses. Can be single float for weighting
all losses, or a dict of individual weightings. Valid dict keys are:
* "loss_cls": applied to classification loss
* "loss_box_reg": applied to box regression loss
"""
super
().
__init__
()
if
isinstance
(
input_shape
,
int
):
# some backward compatibility
input_shape
=
ShapeSpec
(
channels
=
input_shape
)
self
.
num_classes
=
num_classes
input_size
=
input_shape
.
channels
*
(
input_shape
.
width
or
1
)
*
(
input_shape
.
height
or
1
)
# prediction layer for num_classes foreground classes and one background class (hence + 1)
self
.
cls_score
=
nn
.
Linear
(
input_size
,
num_classes
+
1
)
num_bbox_reg_classes
=
1
if
cls_agnostic_bbox_reg
else
num_classes
box_dim
=
len
(
box2box_transform
.
weights
)
self
.
bbox_pred
=
nn
.
Linear
(
input_size
,
num_bbox_reg_classes
*
box_dim
)
nn
.
init
.
normal_
(
self
.
cls_score
.
weight
,
std
=
0.01
)
nn
.
init
.
normal_
(
self
.
bbox_pred
.
weight
,
std
=
0.001
)
for
l
in
[
self
.
cls_score
,
self
.
bbox_pred
]:
nn
.
init
.
constant_
(
l
.
bias
,
0
)
self
.
box2box_transform
=
box2box_transform
self
.
smooth_l1_beta
=
smooth_l1_beta
self
.
test_score_thresh
=
test_score_thresh
self
.
test_nms_thresh
=
test_nms_thresh
self
.
test_topk_per_image
=
test_topk_per_image
self
.
box_reg_loss_type
=
box_reg_loss_type
if
isinstance
(
loss_weight
,
float
):
loss_weight
=
{
"loss_cls"
:
loss_weight
,
"loss_box_reg"
:
loss_weight
}
self
.
loss_weight
=
loss_weight
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
return
{
"input_shape"
:
input_shape
,
"box2box_transform"
:
Box2BoxTransform
(
weights
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_WEIGHTS
),
# fmt: off
"num_classes"
:
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
,
"cls_agnostic_bbox_reg"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CLS_AGNOSTIC_BBOX_REG
,
"smooth_l1_beta"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
SMOOTH_L1_BETA
,
"test_score_thresh"
:
cfg
.
MODEL
.
ROI_HEADS
.
SCORE_THRESH_TEST
,
"test_nms_thresh"
:
cfg
.
MODEL
.
ROI_HEADS
.
NMS_THRESH_TEST
,
"test_topk_per_image"
:
cfg
.
TEST
.
DETECTIONS_PER_IMAGE
,
"box_reg_loss_type"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_LOSS_TYPE
,
"loss_weight"
:
{
"loss_box_reg"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_LOSS_WEIGHT
},
# fmt: on
}
def
forward
(
self
,
x
):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
(Tensor, Tensor):
First tensor: shape (N,K+1), scores for each of the N box. Each row contains the
scores for K object categories and 1 background class.
Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),
or (N,4) for class-agnostic regression.
"""
if
x
.
dim
()
>
2
:
x
=
torch
.
flatten
(
x
,
start_dim
=
1
)
scores
=
self
.
cls_score
(
x
)
proposal_deltas
=
self
.
bbox_pred
(
x
)
return
scores
,
proposal_deltas
def
losses
(
self
,
predictions
,
proposals
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores
,
proposal_deltas
=
predictions
# parse classification outputs
gt_classes
=
(
cat
([
p
.
gt_classes
for
p
in
proposals
],
dim
=
0
)
if
len
(
proposals
)
else
torch
.
empty
(
0
)
)
_log_classification_stats
(
scores
,
gt_classes
)
# parse box regression outputs
if
len
(
proposals
):
proposal_boxes
=
cat
([
p
.
proposal_boxes
.
tensor
for
p
in
proposals
],
dim
=
0
)
# Nx4
assert
not
proposal_boxes
.
requires_grad
,
"Proposals should not require gradients!"
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes
=
cat
(
[(
p
.
gt_boxes
if
p
.
has
(
"gt_boxes"
)
else
p
.
proposal_boxes
).
tensor
for
p
in
proposals
],
dim
=
0
,
)
else
:
proposal_boxes
=
gt_boxes
=
torch
.
empty
((
0
,
4
),
device
=
proposal_deltas
.
device
)
losses
=
{
"loss_cls"
:
cross_entropy
(
scores
,
gt_classes
,
reduction
=
"mean"
),
"loss_box_reg"
:
self
.
box_reg_loss
(
proposal_boxes
,
gt_boxes
,
proposal_deltas
,
gt_classes
),
}
return
{
k
:
v
*
self
.
loss_weight
.
get
(
k
,
1.0
)
for
k
,
v
in
losses
.
items
()}
def
box_reg_loss
(
self
,
proposal_boxes
,
gt_boxes
,
pred_deltas
,
gt_classes
):
"""
Args:
All boxes are tensors with the same shape Rx(4 or 5).
gt_classes is a long tensor of shape R, the gt class label of each proposal.
R shall be the number of proposals.
"""
box_dim
=
proposal_boxes
.
shape
[
1
]
# 4 or 5
# Regression loss is only computed for foreground proposals (those matched to a GT)
fg_inds
=
nonzero_tuple
((
gt_classes
>=
0
)
&
(
gt_classes
<
self
.
num_classes
))[
0
]
if
pred_deltas
.
shape
[
1
]
==
box_dim
:
# cls-agnostic regression
fg_pred_deltas
=
pred_deltas
[
fg_inds
]
else
:
fg_pred_deltas
=
pred_deltas
.
view
(
-
1
,
self
.
num_classes
,
box_dim
)[
fg_inds
,
gt_classes
[
fg_inds
]
]
if
self
.
box_reg_loss_type
==
"smooth_l1"
:
gt_pred_deltas
=
self
.
box2box_transform
.
get_deltas
(
proposal_boxes
[
fg_inds
],
gt_boxes
[
fg_inds
],
)
loss_box_reg
=
smooth_l1_loss
(
fg_pred_deltas
,
gt_pred_deltas
,
self
.
smooth_l1_beta
,
reduction
=
"sum"
)
elif
self
.
box_reg_loss_type
==
"giou"
:
fg_pred_boxes
=
self
.
box2box_transform
.
apply_deltas
(
fg_pred_deltas
,
proposal_boxes
[
fg_inds
]
)
loss_box_reg
=
giou_loss
(
fg_pred_boxes
,
gt_boxes
[
fg_inds
],
reduction
=
"sum"
)
elif
self
.
box_reg_loss_type
==
"diou"
:
fg_pred_boxes
=
self
.
box2box_transform
.
apply_deltas
(
fg_pred_deltas
,
proposal_boxes
[
fg_inds
]
)
loss_box_reg
=
diou_loss
(
fg_pred_boxes
,
gt_boxes
[
fg_inds
],
reduction
=
"sum"
)
elif
self
.
box_reg_loss_type
==
"ciou"
:
fg_pred_boxes
=
self
.
box2box_transform
.
apply_deltas
(
fg_pred_deltas
,
proposal_boxes
[
fg_inds
]
)
loss_box_reg
=
ciou_loss
(
fg_pred_boxes
,
gt_boxes
[
fg_inds
],
reduction
=
"sum"
)
else
:
raise
ValueError
(
f
"Invalid bbox reg loss type '
{
self
.
box_reg_loss_type
}
'"
)
# The reg loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
return
loss_box_reg
/
max
(
gt_classes
.
numel
(),
1.0
)
# return 0 if empty
def
inference
(
self
,
predictions
:
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
],
proposals
:
List
[
Instances
]):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes
=
self
.
predict_boxes
(
predictions
,
proposals
)
scores
=
self
.
predict_probs
(
predictions
,
proposals
)
image_shapes
=
[
x
.
image_size
for
x
in
proposals
]
return
fast_rcnn_inference
(
boxes
,
scores
,
image_shapes
,
self
.
test_score_thresh
,
self
.
test_nms_thresh
,
self
.
test_topk_per_image
,
)
def
predict_boxes_for_gt_classes
(
self
,
predictions
,
proposals
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.
Returns:
list[Tensor]:
A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if
not
len
(
proposals
):
return
[]
scores
,
proposal_deltas
=
predictions
proposal_boxes
=
cat
([
p
.
proposal_boxes
.
tensor
for
p
in
proposals
],
dim
=
0
)
N
,
B
=
proposal_boxes
.
shape
predict_boxes
=
self
.
box2box_transform
.
apply_deltas
(
proposal_deltas
,
proposal_boxes
)
# Nx(KxB)
K
=
predict_boxes
.
shape
[
1
]
//
B
if
K
>
1
:
gt_classes
=
torch
.
cat
([
p
.
gt_classes
for
p
in
proposals
],
dim
=
0
)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes
=
gt_classes
.
clamp_
(
0
,
K
-
1
)
predict_boxes
=
predict_boxes
.
view
(
N
,
K
,
B
)[
torch
.
arange
(
N
,
dtype
=
torch
.
long
,
device
=
predict_boxes
.
device
),
gt_classes
]
num_prop_per_image
=
[
len
(
p
)
for
p
in
proposals
]
return
predict_boxes
.
split
(
num_prop_per_image
)
def
predict_boxes
(
self
,
predictions
:
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
],
proposals
:
List
[
Instances
]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]:
A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if
not
len
(
proposals
):
return
[]
_
,
proposal_deltas
=
predictions
num_prop_per_image
=
[
len
(
p
)
for
p
in
proposals
]
proposal_boxes
=
cat
([
p
.
proposal_boxes
.
tensor
for
p
in
proposals
],
dim
=
0
)
predict_boxes
=
self
.
box2box_transform
.
apply_deltas
(
proposal_deltas
,
proposal_boxes
,
)
# Nx(KxB)
return
predict_boxes
.
split
(
num_prop_per_image
)
def
predict_probs
(
self
,
predictions
:
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
],
proposals
:
List
[
Instances
]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]:
A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.
"""
scores
,
_
=
predictions
num_inst_per_image
=
[
len
(
p
)
for
p
in
proposals
]
probs
=
F
.
softmax
(
scores
,
dim
=-
1
)
return
probs
.
split
(
num_inst_per_image
,
dim
=
0
)
detectron2/modeling/roi_heads/keypoint_head.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
typing
import
List
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ConvTranspose2d
,
cat
,
interpolate
from
detectron2.structures
import
Instances
,
heatmaps_to_keypoints
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
_TOTAL_SKIPPED
=
0
__all__
=
[
"ROI_KEYPOINT_HEAD_REGISTRY"
,
"build_keypoint_head"
,
"BaseKeypointRCNNHead"
,
"KRCNNConvDeconvUpsampleHead"
,
]
ROI_KEYPOINT_HEAD_REGISTRY
=
Registry
(
"ROI_KEYPOINT_HEAD"
)
ROI_KEYPOINT_HEAD_REGISTRY
.
__doc__
=
"""
Registry for keypoint heads, which make keypoint predictions from per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def
build_keypoint_head
(
cfg
,
input_shape
):
"""
Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NAME
return
ROI_KEYPOINT_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
def
keypoint_rcnn_loss
(
pred_keypoint_logits
,
instances
,
normalizer
):
"""
Arguments:
pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number
of instances in the batch, K is the number of keypoints, and S is the side length
of the keypoint heatmap. The values are spatial logits.
instances (list[Instances]): A list of M Instances, where M is the batch size.
These instances are predictions from the model
that are in 1:1 correspondence with pred_keypoint_logits.
Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint`
instance.
normalizer (float): Normalize the loss by this amount.
If not specified, we normalize by the number of visible keypoints in the minibatch.
Returns a scalar tensor containing the loss.
"""
heatmaps
=
[]
valid
=
[]
keypoint_side_len
=
pred_keypoint_logits
.
shape
[
2
]
for
instances_per_image
in
instances
:
if
len
(
instances_per_image
)
==
0
:
continue
keypoints
=
instances_per_image
.
gt_keypoints
heatmaps_per_image
,
valid_per_image
=
keypoints
.
to_heatmap
(
instances_per_image
.
proposal_boxes
.
tensor
,
keypoint_side_len
)
heatmaps
.
append
(
heatmaps_per_image
.
view
(
-
1
))
valid
.
append
(
valid_per_image
.
view
(
-
1
))
if
len
(
heatmaps
):
keypoint_targets
=
cat
(
heatmaps
,
dim
=
0
)
valid
=
cat
(
valid
,
dim
=
0
).
to
(
dtype
=
torch
.
uint8
)
valid
=
torch
.
nonzero
(
valid
).
squeeze
(
1
)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if
len
(
heatmaps
)
==
0
or
valid
.
numel
()
==
0
:
global
_TOTAL_SKIPPED
_TOTAL_SKIPPED
+=
1
storage
=
get_event_storage
()
storage
.
put_scalar
(
"kpts_num_skipped_batches"
,
_TOTAL_SKIPPED
,
smoothing_hint
=
False
)
return
pred_keypoint_logits
.
sum
()
*
0
N
,
K
,
H
,
W
=
pred_keypoint_logits
.
shape
pred_keypoint_logits
=
pred_keypoint_logits
.
view
(
N
*
K
,
H
*
W
)
keypoint_loss
=
F
.
cross_entropy
(
pred_keypoint_logits
[
valid
],
keypoint_targets
[
valid
],
reduction
=
"sum"
)
# If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch
if
normalizer
is
None
:
normalizer
=
valid
.
numel
()
keypoint_loss
/=
normalizer
return
keypoint_loss
def
keypoint_rcnn_inference
(
pred_keypoint_logits
:
torch
.
Tensor
,
pred_instances
:
List
[
Instances
]):
"""
Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score)
and add it to the `pred_instances` as a `pred_keypoints` field.
Args:
pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number
of instances in the batch, K is the number of keypoints, and S is the side length of
the keypoint heatmap. The values are spatial logits.
pred_instances (list[Instances]): A list of N Instances, where N is the number of images.
Returns:
None. Each element in pred_instances will contain extra "pred_keypoints" and
"pred_keypoint_heatmaps" fields. "pred_keypoints" is a tensor of shape
(#instance, K, 3) where the last dimension corresponds to (x, y, score).
The scores are larger than 0. "pred_keypoint_heatmaps" contains the raw
keypoint logits as passed to this function.
"""
# flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor)
bboxes_flat
=
cat
([
b
.
pred_boxes
.
tensor
for
b
in
pred_instances
],
dim
=
0
)
pred_keypoint_logits
=
pred_keypoint_logits
.
detach
()
keypoint_results
=
heatmaps_to_keypoints
(
pred_keypoint_logits
,
bboxes_flat
.
detach
())
num_instances_per_image
=
[
len
(
i
)
for
i
in
pred_instances
]
keypoint_results
=
keypoint_results
[:,
:,
[
0
,
1
,
3
]].
split
(
num_instances_per_image
,
dim
=
0
)
heatmap_results
=
pred_keypoint_logits
.
split
(
num_instances_per_image
,
dim
=
0
)
for
keypoint_results_per_image
,
heatmap_results_per_image
,
instances_per_image
in
zip
(
keypoint_results
,
heatmap_results
,
pred_instances
):
# keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score)
# heatmap_results_per_image is (num instances)x(num keypoints)x(side)x(side)
instances_per_image
.
pred_keypoints
=
keypoint_results_per_image
instances_per_image
.
pred_keypoint_heatmaps
=
heatmap_results_per_image
class
BaseKeypointRCNNHead
(
nn
.
Module
):
"""
Implement the basic Keypoint R-CNN losses and inference logic described in
Sec. 5 of :paper:`Mask R-CNN`.
"""
@
configurable
def
__init__
(
self
,
*
,
num_keypoints
,
loss_weight
=
1.0
,
loss_normalizer
=
1.0
):
"""
NOTE: this interface is experimental.
Args:
num_keypoints (int): number of keypoints to predict
loss_weight (float): weight to multiple on the keypoint loss
loss_normalizer (float or str):
If float, divide the loss by `loss_normalizer * #images`.
If 'visible', the loss is normalized by the total number of
visible keypoints across images.
"""
super
().
__init__
()
self
.
num_keypoints
=
num_keypoints
self
.
loss_weight
=
loss_weight
assert
loss_normalizer
==
"visible"
or
isinstance
(
loss_normalizer
,
float
),
loss_normalizer
self
.
loss_normalizer
=
loss_normalizer
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
{
"loss_weight"
:
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
LOSS_WEIGHT
,
"num_keypoints"
:
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NUM_KEYPOINTS
,
}
normalize_by_visible
=
(
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS
)
# noqa
if
not
normalize_by_visible
:
batch_size_per_image
=
cfg
.
MODEL
.
ROI_HEADS
.
BATCH_SIZE_PER_IMAGE
positive_sample_fraction
=
cfg
.
MODEL
.
ROI_HEADS
.
POSITIVE_FRACTION
ret
[
"loss_normalizer"
]
=
(
ret
[
"num_keypoints"
]
*
batch_size_per_image
*
positive_sample_fraction
)
else
:
ret
[
"loss_normalizer"
]
=
"visible"
return
ret
def
forward
(
self
,
x
,
instances
:
List
[
Instances
]):
"""
Args:
x: input 4D region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses if in training. The predicted "instances" if in inference.
"""
x
=
self
.
layers
(
x
)
if
self
.
training
:
num_images
=
len
(
instances
)
normalizer
=
(
None
if
self
.
loss_normalizer
==
"visible"
else
num_images
*
self
.
loss_normalizer
)
return
{
"loss_keypoint"
:
keypoint_rcnn_loss
(
x
,
instances
,
normalizer
=
normalizer
)
*
self
.
loss_weight
}
else
:
keypoint_rcnn_inference
(
x
,
instances
)
return
instances
def
layers
(
self
,
x
):
"""
Neural network layers that makes predictions from regional input features.
"""
raise
NotImplementedError
# To get torchscript support, we make the head a subclass of `nn.Sequential`.
# Therefore, to add new layers in this head class, please make sure they are
# added in the order they will be used in forward().
@
ROI_KEYPOINT_HEAD_REGISTRY
.
register
()
class
KRCNNConvDeconvUpsampleHead
(
BaseKeypointRCNNHead
,
nn
.
Sequential
):
"""
A standard keypoint head containing a series of 3x3 convs, followed by
a transpose convolution and bilinear interpolation for upsampling.
It is described in Sec. 5 of :paper:`Mask R-CNN`.
"""
@
configurable
def
__init__
(
self
,
input_shape
,
*
,
num_keypoints
,
conv_dims
,
**
kwargs
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
conv_dims: an iterable of output channel counts for each conv in the head
e.g. (512, 512, 512) for three convs outputting 512 channels.
"""
super
().
__init__
(
num_keypoints
=
num_keypoints
,
**
kwargs
)
# default up_scale to 2.0 (this can be made an option)
up_scale
=
2.0
in_channels
=
input_shape
.
channels
for
idx
,
layer_channels
in
enumerate
(
conv_dims
,
1
):
module
=
Conv2d
(
in_channels
,
layer_channels
,
3
,
stride
=
1
,
padding
=
1
)
self
.
add_module
(
"conv_fcn{}"
.
format
(
idx
),
module
)
self
.
add_module
(
"conv_fcn_relu{}"
.
format
(
idx
),
nn
.
ReLU
())
in_channels
=
layer_channels
deconv_kernel
=
4
self
.
score_lowres
=
ConvTranspose2d
(
in_channels
,
num_keypoints
,
deconv_kernel
,
stride
=
2
,
padding
=
deconv_kernel
//
2
-
1
)
self
.
up_scale
=
up_scale
for
name
,
param
in
self
.
named_parameters
():
if
"bias"
in
name
:
nn
.
init
.
constant_
(
param
,
0
)
elif
"weight"
in
name
:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn
.
init
.
kaiming_normal_
(
param
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
ret
[
"input_shape"
]
=
input_shape
ret
[
"conv_dims"
]
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
CONV_DIMS
return
ret
def
layers
(
self
,
x
):
for
layer
in
self
:
x
=
layer
(
x
)
x
=
interpolate
(
x
,
scale_factor
=
self
.
up_scale
,
mode
=
"bilinear"
,
align_corners
=
False
)
return
x
detectron2/modeling/roi_heads/mask_head.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
from
typing
import
List
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ConvTranspose2d
,
ShapeSpec
,
cat
,
get_norm
from
detectron2.structures
import
Instances
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
__all__
=
[
"BaseMaskRCNNHead"
,
"MaskRCNNConvUpsampleHead"
,
"build_mask_head"
,
"ROI_MASK_HEAD_REGISTRY"
,
]
ROI_MASK_HEAD_REGISTRY
=
Registry
(
"ROI_MASK_HEAD"
)
ROI_MASK_HEAD_REGISTRY
.
__doc__
=
"""
Registry for mask heads, which predicts instance masks given
per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
@
torch
.
jit
.
unused
def
mask_rcnn_loss
(
pred_mask_logits
:
torch
.
Tensor
,
instances
:
List
[
Instances
],
vis_period
:
int
=
0
):
"""
Compute the mask prediction loss defined in the Mask R-CNN paper.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
vis_period (int): the period (in steps) to dump visualization.
Returns:
mask_loss (Tensor): A scalar tensor containing the loss.
"""
cls_agnostic_mask
=
pred_mask_logits
.
size
(
1
)
==
1
total_num_masks
=
pred_mask_logits
.
size
(
0
)
mask_side_len
=
pred_mask_logits
.
size
(
2
)
assert
pred_mask_logits
.
size
(
2
)
==
pred_mask_logits
.
size
(
3
),
"Mask prediction must be square!"
gt_classes
=
[]
gt_masks
=
[]
for
instances_per_image
in
instances
:
if
len
(
instances_per_image
)
==
0
:
continue
if
not
cls_agnostic_mask
:
gt_classes_per_image
=
instances_per_image
.
gt_classes
.
to
(
dtype
=
torch
.
int64
)
gt_classes
.
append
(
gt_classes_per_image
)
gt_masks_per_image
=
instances_per_image
.
gt_masks
.
crop_and_resize
(
instances_per_image
.
proposal_boxes
.
tensor
,
mask_side_len
).
to
(
device
=
pred_mask_logits
.
device
)
# A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len
gt_masks
.
append
(
gt_masks_per_image
)
if
len
(
gt_masks
)
==
0
:
return
pred_mask_logits
.
sum
()
*
0
gt_masks
=
cat
(
gt_masks
,
dim
=
0
)
if
cls_agnostic_mask
:
pred_mask_logits
=
pred_mask_logits
[:,
0
]
else
:
indices
=
torch
.
arange
(
total_num_masks
)
gt_classes
=
cat
(
gt_classes
,
dim
=
0
)
pred_mask_logits
=
pred_mask_logits
[
indices
,
gt_classes
]
if
gt_masks
.
dtype
==
torch
.
bool
:
gt_masks_bool
=
gt_masks
else
:
# Here we allow gt_masks to be float as well (depend on the implementation of rasterize())
gt_masks_bool
=
gt_masks
>
0.5
gt_masks
=
gt_masks
.
to
(
dtype
=
torch
.
float32
)
# Log the training accuracy (using gt classes and 0.5 threshold)
mask_incorrect
=
(
pred_mask_logits
>
0.0
)
!=
gt_masks_bool
mask_accuracy
=
1
-
(
mask_incorrect
.
sum
().
item
()
/
max
(
mask_incorrect
.
numel
(),
1.0
))
num_positive
=
gt_masks_bool
.
sum
().
item
()
false_positive
=
(
mask_incorrect
&
~
gt_masks_bool
).
sum
().
item
()
/
max
(
gt_masks_bool
.
numel
()
-
num_positive
,
1.0
)
false_negative
=
(
mask_incorrect
&
gt_masks_bool
).
sum
().
item
()
/
max
(
num_positive
,
1.0
)
storage
=
get_event_storage
()
storage
.
put_scalar
(
"mask_rcnn/accuracy"
,
mask_accuracy
)
storage
.
put_scalar
(
"mask_rcnn/false_positive"
,
false_positive
)
storage
.
put_scalar
(
"mask_rcnn/false_negative"
,
false_negative
)
if
vis_period
>
0
and
storage
.
iter
%
vis_period
==
0
:
pred_masks
=
pred_mask_logits
.
sigmoid
()
vis_masks
=
torch
.
cat
([
pred_masks
,
gt_masks
],
axis
=
2
)
name
=
"Left: mask prediction; Right: mask GT"
for
idx
,
vis_mask
in
enumerate
(
vis_masks
):
vis_mask
=
torch
.
stack
([
vis_mask
]
*
3
,
axis
=
0
)
storage
.
put_image
(
name
+
f
" (
{
idx
}
)"
,
vis_mask
)
mask_loss
=
F
.
binary_cross_entropy_with_logits
(
pred_mask_logits
,
gt_masks
,
reduction
=
"mean"
)
return
mask_loss
def
mask_rcnn_inference
(
pred_mask_logits
:
torch
.
Tensor
,
pred_instances
:
List
[
Instances
]):
"""
Convert pred_mask_logits to estimated foreground probability masks while also
extracting only the masks for the predicted classes in pred_instances. For each
predicted box, the mask of the same class is attached to the instance by adding a
new "pred_masks" field to pred_instances.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
pred_instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. Each Instances must have field "pred_classes".
Returns:
None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask,
Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized)
masks the resolution predicted by the network; post-processing steps, such as resizing
the predicted masks to the original image resolution and/or binarizing them, is left
to the caller.
"""
cls_agnostic_mask
=
pred_mask_logits
.
size
(
1
)
==
1
if
cls_agnostic_mask
:
mask_probs_pred
=
pred_mask_logits
.
sigmoid
()
else
:
# Select masks corresponding to the predicted classes
num_masks
=
pred_mask_logits
.
shape
[
0
]
class_pred
=
cat
([
i
.
pred_classes
for
i
in
pred_instances
])
indices
=
torch
.
arange
(
num_masks
,
device
=
class_pred
.
device
)
mask_probs_pred
=
pred_mask_logits
[
indices
,
class_pred
][:,
None
].
sigmoid
()
# mask_probs_pred.shape: (B, 1, Hmask, Wmask)
num_boxes_per_image
=
[
len
(
i
)
for
i
in
pred_instances
]
mask_probs_pred
=
mask_probs_pred
.
split
(
num_boxes_per_image
,
dim
=
0
)
for
prob
,
instances
in
zip
(
mask_probs_pred
,
pred_instances
):
instances
.
pred_masks
=
prob
# (1, Hmask, Wmask)
class
BaseMaskRCNNHead
(
nn
.
Module
):
"""
Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN`
"""
@
configurable
def
__init__
(
self
,
*
,
loss_weight
:
float
=
1.0
,
vis_period
:
int
=
0
):
"""
NOTE: this interface is experimental.
Args:
loss_weight (float): multiplier of the loss
vis_period (int): visualization period
"""
super
().
__init__
()
self
.
vis_period
=
vis_period
self
.
loss_weight
=
loss_weight
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
return
{
"vis_period"
:
cfg
.
VIS_PERIOD
}
def
forward
(
self
,
x
,
instances
:
List
[
Instances
]):
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
x
=
self
.
layers
(
x
)
if
self
.
training
:
return
{
"loss_mask"
:
mask_rcnn_loss
(
x
,
instances
,
self
.
vis_period
)
*
self
.
loss_weight
}
else
:
mask_rcnn_inference
(
x
,
instances
)
return
instances
def
layers
(
self
,
x
):
"""
Neural network layers that makes predictions from input features.
"""
raise
NotImplementedError
# To get torchscript support, we make the head a subclass of `nn.Sequential`.
# Therefore, to add new layers in this head class, please make sure they are
# added in the order they will be used in forward().
@
ROI_MASK_HEAD_REGISTRY
.
register
()
class
MaskRCNNConvUpsampleHead
(
BaseMaskRCNNHead
,
nn
.
Sequential
):
"""
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
Predictions are made with a final 1x1 conv layer.
"""
@
configurable
def
__init__
(
self
,
input_shape
:
ShapeSpec
,
*
,
num_classes
,
conv_dims
,
conv_norm
=
""
,
**
kwargs
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
num_classes (int): the number of foreground classes (i.e. background is not
included). 1 if using class agnostic prediction.
conv_dims (list[int]): a list of N>0 integers representing the output dimensions
of N-1 conv layers and the last upsample layer.
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
"""
super
().
__init__
(
**
kwargs
)
assert
len
(
conv_dims
)
>=
1
,
"conv_dims have to be non-empty!"
self
.
conv_norm_relus
=
[]
cur_channels
=
input_shape
.
channels
for
k
,
conv_dim
in
enumerate
(
conv_dims
[:
-
1
]):
conv
=
Conv2d
(
cur_channels
,
conv_dim
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
not
conv_norm
,
norm
=
get_norm
(
conv_norm
,
conv_dim
),
activation
=
nn
.
ReLU
(),
)
self
.
add_module
(
"mask_fcn{}"
.
format
(
k
+
1
),
conv
)
self
.
conv_norm_relus
.
append
(
conv
)
cur_channels
=
conv_dim
self
.
deconv
=
ConvTranspose2d
(
cur_channels
,
conv_dims
[
-
1
],
kernel_size
=
2
,
stride
=
2
,
padding
=
0
)
self
.
add_module
(
"deconv_relu"
,
nn
.
ReLU
())
cur_channels
=
conv_dims
[
-
1
]
self
.
predictor
=
Conv2d
(
cur_channels
,
num_classes
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
)
for
layer
in
self
.
conv_norm_relus
+
[
self
.
deconv
]:
weight_init
.
c2_msra_fill
(
layer
)
# use normal distribution initialization for mask prediction layer
nn
.
init
.
normal_
(
self
.
predictor
.
weight
,
std
=
0.001
)
if
self
.
predictor
.
bias
is
not
None
:
nn
.
init
.
constant_
(
self
.
predictor
.
bias
,
0
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
conv_dim
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
CONV_DIM
num_conv
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NUM_CONV
ret
.
update
(
conv_dims
=
[
conv_dim
]
*
(
num_conv
+
1
),
# +1 for ConvTranspose
conv_norm
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NORM
,
input_shape
=
input_shape
,
)
if
cfg
.
MODEL
.
ROI_MASK_HEAD
.
CLS_AGNOSTIC_MASK
:
ret
[
"num_classes"
]
=
1
else
:
ret
[
"num_classes"
]
=
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
return
ret
def
layers
(
self
,
x
):
for
layer
in
self
:
x
=
layer
(
x
)
return
x
def
build_mask_head
(
cfg
,
input_shape
):
"""
Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NAME
return
ROI_MASK_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/roi_heads/roi_heads.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
inspect
import
logging
import
numpy
as
np
from
typing
import
Dict
,
List
,
Optional
,
Tuple
import
torch
from
torch
import
nn
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
,
nonzero_tuple
from
detectron2.structures
import
Boxes
,
ImageList
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
from
..backbone.resnet
import
BottleneckBlock
,
ResNet
from
..matcher
import
Matcher
from
..poolers
import
ROIPooler
from
..proposal_generator.proposal_utils
import
add_ground_truth_to_proposals
from
..sampling
import
subsample_labels
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.keypoint_head
import
build_keypoint_head
from
.mask_head
import
build_mask_head
ROI_HEADS_REGISTRY
=
Registry
(
"ROI_HEADS"
)
ROI_HEADS_REGISTRY
.
__doc__
=
"""
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger
=
logging
.
getLogger
(
__name__
)
def
build_roi_heads
(
cfg
,
input_shape
):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_HEADS
.
NAME
return
ROI_HEADS_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
def
select_foreground_proposals
(
proposals
:
List
[
Instances
],
bg_label
:
int
)
->
Tuple
[
List
[
Instances
],
List
[
torch
.
Tensor
]]:
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert
isinstance
(
proposals
,
(
list
,
tuple
))
assert
isinstance
(
proposals
[
0
],
Instances
)
assert
proposals
[
0
].
has
(
"gt_classes"
)
fg_proposals
=
[]
fg_selection_masks
=
[]
for
proposals_per_image
in
proposals
:
gt_classes
=
proposals_per_image
.
gt_classes
fg_selection_mask
=
(
gt_classes
!=
-
1
)
&
(
gt_classes
!=
bg_label
)
fg_idxs
=
fg_selection_mask
.
nonzero
().
squeeze
(
1
)
fg_proposals
.
append
(
proposals_per_image
[
fg_idxs
])
fg_selection_masks
.
append
(
fg_selection_mask
)
return
fg_proposals
,
fg_selection_masks
def
select_proposals_with_visible_keypoints
(
proposals
:
List
[
Instances
])
->
List
[
Instances
]:
"""
Args:
proposals (list[Instances]): a list of N Instances, where N is the
number of images.
Returns:
proposals: only contains proposals with at least one visible keypoint.
Note that this is still slightly different from Detectron.
In Detectron, proposals for training keypoint head are re-sampled from
all the proposals with IOU>threshold & >=1 visible keypoint.
Here, the proposals are first sampled from all proposals with
IOU>threshold, then proposals with no visible keypoint are filtered out.
This strategy seems to make no difference on Detectron and is easier to implement.
"""
ret
=
[]
all_num_fg
=
[]
for
proposals_per_image
in
proposals
:
# If empty/unannotated image (hard negatives), skip filtering for train
if
len
(
proposals_per_image
)
==
0
:
ret
.
append
(
proposals_per_image
)
continue
gt_keypoints
=
proposals_per_image
.
gt_keypoints
.
tensor
# #fg x K x 3
vis_mask
=
gt_keypoints
[:,
:,
2
]
>=
1
xs
,
ys
=
gt_keypoints
[:,
:,
0
],
gt_keypoints
[:,
:,
1
]
proposal_boxes
=
proposals_per_image
.
proposal_boxes
.
tensor
.
unsqueeze
(
dim
=
1
)
# #fg x 1 x 4
kp_in_box
=
(
(
xs
>=
proposal_boxes
[:,
:,
0
])
&
(
xs
<=
proposal_boxes
[:,
:,
2
])
&
(
ys
>=
proposal_boxes
[:,
:,
1
])
&
(
ys
<=
proposal_boxes
[:,
:,
3
])
)
selection
=
(
kp_in_box
&
vis_mask
).
any
(
dim
=
1
)
selection_idxs
=
nonzero_tuple
(
selection
)[
0
]
all_num_fg
.
append
(
selection_idxs
.
numel
())
ret
.
append
(
proposals_per_image
[
selection_idxs
])
storage
=
get_event_storage
()
storage
.
put_scalar
(
"keypoint_head/num_fg_samples"
,
np
.
mean
(
all_num_fg
))
return
ret
class
ROIHeads
(
torch
.
nn
.
Module
):
"""
ROIHeads perform all per-region computation in an R-CNN.
It typically contains logic to
1. (in training only) match proposals with ground truth and sample them
2. crop the regions and extract per-region features using proposals
3. make per-region predictions with different heads
It can have many variants, implemented as subclasses of this class.
This base class contains the logic to match/sample proposals.
But it is not necessary to inherit this class if the sampling logic is not needed.
"""
@
configurable
def
__init__
(
self
,
*
,
num_classes
,
batch_size_per_image
,
positive_fraction
,
proposal_matcher
,
proposal_append_gt
=
True
,
):
"""
NOTE: this interface is experimental.
Args:
num_classes (int): number of foreground classes (i.e. background is not included)
batch_size_per_image (int): number of proposals to sample for training
positive_fraction (float): fraction of positive (foreground) proposals
to sample for training.
proposal_matcher (Matcher): matcher that matches proposals and ground truth
proposal_append_gt (bool): whether to include ground truth as proposals as well
"""
super
().
__init__
()
self
.
batch_size_per_image
=
batch_size_per_image
self
.
positive_fraction
=
positive_fraction
self
.
num_classes
=
num_classes
self
.
proposal_matcher
=
proposal_matcher
self
.
proposal_append_gt
=
proposal_append_gt
@
classmethod
def
from_config
(
cls
,
cfg
):
return
{
"batch_size_per_image"
:
cfg
.
MODEL
.
ROI_HEADS
.
BATCH_SIZE_PER_IMAGE
,
"positive_fraction"
:
cfg
.
MODEL
.
ROI_HEADS
.
POSITIVE_FRACTION
,
"num_classes"
:
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
,
"proposal_append_gt"
:
cfg
.
MODEL
.
ROI_HEADS
.
PROPOSAL_APPEND_GT
,
# Matcher to assign box proposals to gt boxes
"proposal_matcher"
:
Matcher
(
cfg
.
MODEL
.
ROI_HEADS
.
IOU_THRESHOLDS
,
cfg
.
MODEL
.
ROI_HEADS
.
IOU_LABELS
,
allow_low_quality_matches
=
False
,
),
}
def
_sample_proposals
(
self
,
matched_idxs
:
torch
.
Tensor
,
matched_labels
:
torch
.
Tensor
,
gt_classes
:
torch
.
Tensor
)
->
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt
=
gt_classes
.
numel
()
>
0
# Get the corresponding GT for each proposal
if
has_gt
:
gt_classes
=
gt_classes
[
matched_idxs
]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes
[
matched_labels
==
0
]
=
self
.
num_classes
# Label ignore proposals (-1 label)
gt_classes
[
matched_labels
==
-
1
]
=
-
1
else
:
gt_classes
=
torch
.
zeros_like
(
matched_idxs
)
+
self
.
num_classes
sampled_fg_idxs
,
sampled_bg_idxs
=
subsample_labels
(
gt_classes
,
self
.
batch_size_per_image
,
self
.
positive_fraction
,
self
.
num_classes
)
sampled_idxs
=
torch
.
cat
([
sampled_fg_idxs
,
sampled_bg_idxs
],
dim
=
0
)
return
sampled_idxs
,
gt_classes
[
sampled_idxs
]
@
torch
.
no_grad
()
def
label_and_sample_proposals
(
self
,
proposals
:
List
[
Instances
],
targets
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if
self
.
proposal_append_gt
:
proposals
=
add_ground_truth_to_proposals
(
targets
,
proposals
)
proposals_with_gt
=
[]
num_fg_samples
=
[]
num_bg_samples
=
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
has_gt
=
len
(
targets_per_image
)
>
0
match_quality_matrix
=
pairwise_iou
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
matched_idxs
,
matched_labels
=
self
.
proposal_matcher
(
match_quality_matrix
)
sampled_idxs
,
gt_classes
=
self
.
_sample_proposals
(
matched_idxs
,
matched_labels
,
targets_per_image
.
gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image
=
proposals_per_image
[
sampled_idxs
]
proposals_per_image
.
gt_classes
=
gt_classes
if
has_gt
:
sampled_targets
=
matched_idxs
[
sampled_idxs
]
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for
(
trg_name
,
trg_value
)
in
targets_per_image
.
get_fields
().
items
():
if
trg_name
.
startswith
(
"gt_"
)
and
not
proposals_per_image
.
has
(
trg_name
):
proposals_per_image
.
set
(
trg_name
,
trg_value
[
sampled_targets
])
# If no GT is given in the image, we don't know what a dummy gt value can be.
# Therefore the returned proposals won't have any gt_* fields, except for a
# gt_classes full of background label.
num_bg_samples
.
append
((
gt_classes
==
self
.
num_classes
).
sum
().
item
())
num_fg_samples
.
append
(
gt_classes
.
numel
()
-
num_bg_samples
[
-
1
])
proposals_with_gt
.
append
(
proposals_per_image
)
# Log the number of fg/bg samples that are selected for training ROI heads
storage
=
get_event_storage
()
storage
.
put_scalar
(
"roi_head/num_fg_samples"
,
np
.
mean
(
num_fg_samples
))
storage
.
put_scalar
(
"roi_head/num_bg_samples"
,
np
.
mean
(
num_bg_samples
))
return
proposals_with_gt
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
],
targets
:
Optional
[
List
[
Instances
]]
=
None
,
)
->
Tuple
[
List
[
Instances
],
Dict
[
str
,
torch
.
Tensor
]]:
"""
Args:
images (ImageList):
features (dict[str,Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
- gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
- gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
Returns:
list[Instances]: length `N` list of `Instances` containing the
detected instances. Returned during inference only; may be [] during training.
dict[str->Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
raise
NotImplementedError
()
@
ROI_HEADS_REGISTRY
.
register
()
class
Res5ROIHeads
(
ROIHeads
):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
See :paper:`ResNet` Appendix A.
"""
@
configurable
def
__init__
(
self
,
*
,
in_features
:
List
[
str
],
pooler
:
ROIPooler
,
res5
:
nn
.
Module
,
box_predictor
:
nn
.
Module
,
mask_head
:
Optional
[
nn
.
Module
]
=
None
,
**
kwargs
,
):
"""
NOTE: this interface is experimental.
Args:
in_features (list[str]): list of backbone feature map names to use for
feature extraction
pooler (ROIPooler): pooler to extra region features from backbone
res5 (nn.Sequential): a CNN to compute per-region features, to be used by
``box_predictor`` and ``mask_head``. Typically this is a "res5"
block from a ResNet.
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_head (nn.Module): transform features to make mask predictions
"""
super
().
__init__
(
**
kwargs
)
self
.
in_features
=
in_features
self
.
pooler
=
pooler
if
isinstance
(
res5
,
(
list
,
tuple
)):
res5
=
nn
.
Sequential
(
*
res5
)
self
.
res5
=
res5
self
.
box_predictor
=
box_predictor
self
.
mask_on
=
mask_head
is
not
None
if
self
.
mask_on
:
self
.
mask_head
=
mask_head
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
# fmt: off
ret
=
super
().
from_config
(
cfg
)
in_features
=
ret
[
"in_features"
]
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
pooler_scales
=
(
1.0
/
input_shape
[
in_features
[
0
]].
stride
,
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
mask_on
=
cfg
.
MODEL
.
MASK_ON
# fmt: on
assert
not
cfg
.
MODEL
.
KEYPOINT_ON
assert
len
(
in_features
)
==
1
ret
[
"pooler"
]
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
# Compatbility with old moco code. Might be useful.
# See notes in StandardROIHeads.from_config
if
not
inspect
.
ismethod
(
cls
.
_build_res5_block
):
logger
.
warning
(
"The behavior of _build_res5_block may change. "
"Please do not depend on private methods."
)
cls
.
_build_res5_block
=
classmethod
(
cls
.
_build_res5_block
)
ret
[
"res5"
],
out_channels
=
cls
.
_build_res5_block
(
cfg
)
ret
[
"box_predictor"
]
=
FastRCNNOutputLayers
(
cfg
,
ShapeSpec
(
channels
=
out_channels
,
height
=
1
,
width
=
1
)
)
if
mask_on
:
ret
[
"mask_head"
]
=
build_mask_head
(
cfg
,
ShapeSpec
(
channels
=
out_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
),
)
return
ret
@
classmethod
def
_build_res5_block
(
cls
,
cfg
):
# fmt: off
stage_channel_factor
=
2
**
3
# res5 is 8x res2
num_groups
=
cfg
.
MODEL
.
RESNETS
.
NUM_GROUPS
width_per_group
=
cfg
.
MODEL
.
RESNETS
.
WIDTH_PER_GROUP
bottleneck_channels
=
num_groups
*
width_per_group
*
stage_channel_factor
out_channels
=
cfg
.
MODEL
.
RESNETS
.
RES2_OUT_CHANNELS
*
stage_channel_factor
stride_in_1x1
=
cfg
.
MODEL
.
RESNETS
.
STRIDE_IN_1X1
norm
=
cfg
.
MODEL
.
RESNETS
.
NORM
assert
not
cfg
.
MODEL
.
RESNETS
.
DEFORM_ON_PER_STAGE
[
-
1
],
\
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks
=
ResNet
.
make_stage
(
BottleneckBlock
,
3
,
stride_per_block
=
[
2
,
1
,
1
],
in_channels
=
out_channels
//
2
,
bottleneck_channels
=
bottleneck_channels
,
out_channels
=
out_channels
,
num_groups
=
num_groups
,
norm
=
norm
,
stride_in_1x1
=
stride_in_1x1
,
)
return
nn
.
Sequential
(
*
blocks
),
out_channels
def
_shared_roi_transform
(
self
,
features
:
List
[
torch
.
Tensor
],
boxes
:
List
[
Boxes
]):
x
=
self
.
pooler
(
features
,
boxes
)
return
self
.
res5
(
x
)
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
],
targets
:
Optional
[
List
[
Instances
]]
=
None
,
):
"""
See :meth:`ROIHeads.forward`.
"""
del
images
if
self
.
training
:
assert
targets
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
del
targets
proposal_boxes
=
[
x
.
proposal_boxes
for
x
in
proposals
]
box_features
=
self
.
_shared_roi_transform
(
[
features
[
f
]
for
f
in
self
.
in_features
],
proposal_boxes
)
predictions
=
self
.
box_predictor
(
box_features
.
mean
(
dim
=
[
2
,
3
]))
if
self
.
training
:
del
features
losses
=
self
.
box_predictor
.
losses
(
predictions
,
proposals
)
if
self
.
mask_on
:
proposals
,
fg_selection_masks
=
select_foreground_proposals
(
proposals
,
self
.
num_classes
)
# Since the ROI feature transform is shared between boxes and masks,
# we don't need to recompute features. The mask loss is only defined
# on foreground proposals, so we need to select out the foreground
# features.
mask_features
=
box_features
[
torch
.
cat
(
fg_selection_masks
,
dim
=
0
)]
del
box_features
losses
.
update
(
self
.
mask_head
(
mask_features
,
proposals
))
return
[],
losses
else
:
pred_instances
,
_
=
self
.
box_predictor
.
inference
(
predictions
,
proposals
)
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
forward_with_given_boxes
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances):
the same `Instances` object, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert
not
self
.
training
assert
instances
[
0
].
has
(
"pred_boxes"
)
and
instances
[
0
].
has
(
"pred_classes"
)
if
self
.
mask_on
:
feature_list
=
[
features
[
f
]
for
f
in
self
.
in_features
]
x
=
self
.
_shared_roi_transform
(
feature_list
,
[
x
.
pred_boxes
for
x
in
instances
])
return
self
.
mask_head
(
x
,
instances
)
else
:
return
instances
@
ROI_HEADS_REGISTRY
.
register
()
class
StandardROIHeads
(
ROIHeads
):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@
configurable
def
__init__
(
self
,
*
,
box_in_features
:
List
[
str
],
box_pooler
:
ROIPooler
,
box_head
:
nn
.
Module
,
box_predictor
:
nn
.
Module
,
mask_in_features
:
Optional
[
List
[
str
]]
=
None
,
mask_pooler
:
Optional
[
ROIPooler
]
=
None
,
mask_head
:
Optional
[
nn
.
Module
]
=
None
,
keypoint_in_features
:
Optional
[
List
[
str
]]
=
None
,
keypoint_pooler
:
Optional
[
ROIPooler
]
=
None
,
keypoint_head
:
Optional
[
nn
.
Module
]
=
None
,
train_on_pred_boxes
:
bool
=
False
,
**
kwargs
,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask
pooler or mask head. None if not using mask head.
mask_pooler (ROIPooler): pooler to extract region features from image features.
The mask head will then take region features to make predictions.
If None, the mask head will directly take the dict of image features
defined by `mask_in_features`
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super
().
__init__
(
**
kwargs
)
# keep self.in_features for backward compatibility
self
.
in_features
=
self
.
box_in_features
=
box_in_features
self
.
box_pooler
=
box_pooler
self
.
box_head
=
box_head
self
.
box_predictor
=
box_predictor
self
.
mask_on
=
mask_in_features
is
not
None
if
self
.
mask_on
:
self
.
mask_in_features
=
mask_in_features
self
.
mask_pooler
=
mask_pooler
self
.
mask_head
=
mask_head
self
.
keypoint_on
=
keypoint_in_features
is
not
None
if
self
.
keypoint_on
:
self
.
keypoint_in_features
=
keypoint_in_features
self
.
keypoint_pooler
=
keypoint_pooler
self
.
keypoint_head
=
keypoint_head
self
.
train_on_pred_boxes
=
train_on_pred_boxes
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
)
ret
[
"train_on_pred_boxes"
]
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if
inspect
.
ismethod
(
cls
.
_init_box_head
):
ret
.
update
(
cls
.
_init_box_head
(
cfg
,
input_shape
))
if
inspect
.
ismethod
(
cls
.
_init_mask_head
):
ret
.
update
(
cls
.
_init_mask_head
(
cfg
,
input_shape
))
if
inspect
.
ismethod
(
cls
.
_init_keypoint_head
):
ret
.
update
(
cls
.
_init_keypoint_head
(
cfg
,
input_shape
))
return
ret
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
]
# Check all channel counts are equal
assert
len
(
set
(
in_channels
))
==
1
,
in_channels
in_channels
=
in_channels
[
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head
=
build_box_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
height
=
pooler_resolution
,
width
=
pooler_resolution
)
)
box_predictor
=
FastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
)
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_head"
:
box_head
,
"box_predictor"
:
box_predictor
,
}
@
classmethod
def
_init_mask_head
(
cls
,
cfg
,
input_shape
):
if
not
cfg
.
MODEL
.
MASK_ON
:
return
{}
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_TYPE
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
ret
=
{
"mask_in_features"
:
in_features
}
ret
[
"mask_pooler"
]
=
(
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
if
pooler_type
else
None
)
if
pooler_type
:
shape
=
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
else
:
shape
=
{
f
:
input_shape
[
f
]
for
f
in
in_features
}
ret
[
"mask_head"
]
=
build_mask_head
(
cfg
,
shape
)
return
ret
@
classmethod
def
_init_keypoint_head
(
cls
,
cfg
,
input_shape
):
if
not
cfg
.
MODEL
.
KEYPOINT_ON
:
return
{}
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
# noqa
sampling_ratio
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_TYPE
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
ret
=
{
"keypoint_in_features"
:
in_features
}
ret
[
"keypoint_pooler"
]
=
(
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
if
pooler_type
else
None
)
if
pooler_type
:
shape
=
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
else
:
shape
=
{
f
:
input_shape
[
f
]
for
f
in
in_features
}
ret
[
"keypoint_head"
]
=
build_keypoint_head
(
cfg
,
shape
)
return
ret
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
],
targets
:
Optional
[
List
[
Instances
]]
=
None
,
)
->
Tuple
[
List
[
Instances
],
Dict
[
str
,
torch
.
Tensor
]]:
"""
See :class:`ROIHeads.forward`.
"""
del
images
if
self
.
training
:
assert
targets
,
"'targets' argument is required during training"
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
del
targets
if
self
.
training
:
losses
=
self
.
_forward_box
(
features
,
proposals
)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses
.
update
(
self
.
_forward_mask
(
features
,
proposals
))
losses
.
update
(
self
.
_forward_keypoint
(
features
,
proposals
))
return
proposals
,
losses
else
:
pred_instances
=
self
.
_forward_box
(
features
,
proposals
)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
forward_with_given_boxes
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
list[Instances]:
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert
not
self
.
training
assert
instances
[
0
].
has
(
"pred_boxes"
)
and
instances
[
0
].
has
(
"pred_classes"
)
instances
=
self
.
_forward_mask
(
features
,
instances
)
instances
=
self
.
_forward_keypoint
(
features
,
instances
)
return
instances
def
_forward_box
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
]):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features
=
[
features
[
f
]
for
f
in
self
.
box_in_features
]
box_features
=
self
.
box_pooler
(
features
,
[
x
.
proposal_boxes
for
x
in
proposals
])
box_features
=
self
.
box_head
(
box_features
)
predictions
=
self
.
box_predictor
(
box_features
)
del
box_features
if
self
.
training
:
losses
=
self
.
box_predictor
.
losses
(
predictions
,
proposals
)
# proposals is modified in-place below, so losses must be computed first.
if
self
.
train_on_pred_boxes
:
with
torch
.
no_grad
():
pred_boxes
=
self
.
box_predictor
.
predict_boxes_for_gt_classes
(
predictions
,
proposals
)
for
proposals_per_image
,
pred_boxes_per_image
in
zip
(
proposals
,
pred_boxes
):
proposals_per_image
.
proposal_boxes
=
Boxes
(
pred_boxes_per_image
)
return
losses
else
:
pred_instances
,
_
=
self
.
box_predictor
.
inference
(
predictions
,
proposals
)
return
pred_instances
def
_forward_mask
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]):
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the boxes predicted by R-CNN box head.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if
not
self
.
mask_on
:
return
{}
if
self
.
training
else
instances
if
self
.
training
:
# head is only trained on positive proposals.
instances
,
_
=
select_foreground_proposals
(
instances
,
self
.
num_classes
)
if
self
.
mask_pooler
is
not
None
:
features
=
[
features
[
f
]
for
f
in
self
.
mask_in_features
]
boxes
=
[
x
.
proposal_boxes
if
self
.
training
else
x
.
pred_boxes
for
x
in
instances
]
features
=
self
.
mask_pooler
(
features
,
boxes
)
else
:
features
=
{
f
:
features
[
f
]
for
f
in
self
.
mask_in_features
}
return
self
.
mask_head
(
features
,
instances
)
def
_forward_keypoint
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]):
"""
Forward logic of the keypoint prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the boxes predicted by R-CNN box head.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if
not
self
.
keypoint_on
:
return
{}
if
self
.
training
else
instances
if
self
.
training
:
# head is only trained on positive proposals with >=1 visible keypoints.
instances
,
_
=
select_foreground_proposals
(
instances
,
self
.
num_classes
)
instances
=
select_proposals_with_visible_keypoints
(
instances
)
if
self
.
keypoint_pooler
is
not
None
:
features
=
[
features
[
f
]
for
f
in
self
.
keypoint_in_features
]
boxes
=
[
x
.
proposal_boxes
if
self
.
training
else
x
.
pred_boxes
for
x
in
instances
]
features
=
self
.
keypoint_pooler
(
features
,
boxes
)
else
:
features
=
{
f
:
features
[
f
]
for
f
in
self
.
keypoint_in_features
}
return
self
.
keypoint_head
(
features
,
instances
)
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
logging
import
numpy
as
np
import
torch
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
,
batched_nms_rotated
from
detectron2.structures
import
Instances
,
RotatedBoxes
,
pairwise_iou_rotated
from
detectron2.utils.events
import
get_event_storage
from
..box_regression
import
Box2BoxTransformRotated
from
..poolers
import
ROIPooler
from
..proposal_generator.proposal_utils
import
add_ground_truth_to_proposals
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.roi_heads
import
ROI_HEADS_REGISTRY
,
StandardROIHeads
logger
=
logging
.
getLogger
(
__name__
)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransformRotated`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth rotated box2box transform deltas
"""
def
fast_rcnn_inference_rotated
(
boxes
,
scores
,
image_shapes
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Call `fast_rcnn_inference_single_image_rotated` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 5) if doing
class-specific regression, or (Ri, 5) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image
=
[
fast_rcnn_inference_single_image_rotated
(
boxes_per_image
,
scores_per_image
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
)
for
scores_per_image
,
boxes_per_image
,
image_shape
in
zip
(
scores
,
boxes
,
image_shapes
)
]
return
[
x
[
0
]
for
x
in
result_per_image
],
[
x
[
1
]
for
x
in
result_per_image
]
def
fast_rcnn_inference_single_image_rotated
(
boxes
,
scores
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Single-image inference. Return rotated bounding-box detection results by thresholding
on scores and applying rotated non-maximum suppression (Rotated NMS).
Args:
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference_rotated`, but for only one image.
"""
valid_mask
=
torch
.
isfinite
(
boxes
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores
).
all
(
dim
=
1
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores
=
scores
[
valid_mask
]
B
=
5
# box dimension
scores
=
scores
[:,
:
-
1
]
num_bbox_reg_classes
=
boxes
.
shape
[
1
]
//
B
# Convert to Boxes to use the `clip` function ...
boxes
=
RotatedBoxes
(
boxes
.
reshape
(
-
1
,
B
))
boxes
.
clip
(
image_shape
)
boxes
=
boxes
.
tensor
.
view
(
-
1
,
num_bbox_reg_classes
,
B
)
# R x C x B
# Filter results based on detection scores
filter_mask
=
scores
>
score_thresh
# R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds
=
filter_mask
.
nonzero
()
if
num_bbox_reg_classes
==
1
:
boxes
=
boxes
[
filter_inds
[:,
0
],
0
]
else
:
boxes
=
boxes
[
filter_mask
]
scores
=
scores
[
filter_mask
]
# Apply per-class Rotated NMS
keep
=
batched_nms_rotated
(
boxes
,
scores
,
filter_inds
[:,
1
],
nms_thresh
)
if
topk_per_image
>=
0
:
keep
=
keep
[:
topk_per_image
]
boxes
,
scores
,
filter_inds
=
boxes
[
keep
],
scores
[
keep
],
filter_inds
[
keep
]
result
=
Instances
(
image_shape
)
result
.
pred_boxes
=
RotatedBoxes
(
boxes
)
result
.
scores
=
scores
result
.
pred_classes
=
filter_inds
[:,
1
]
return
result
,
filter_inds
[:,
0
]
class
RotatedFastRCNNOutputLayers
(
FastRCNNOutputLayers
):
"""
Two linear layers for predicting Rotated Fast R-CNN outputs.
"""
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
args
=
super
().
from_config
(
cfg
,
input_shape
)
args
[
"box2box_transform"
]
=
Box2BoxTransformRotated
(
weights
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_WEIGHTS
)
return
args
def
inference
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference_rotated`.
list[Tensor]: same as `fast_rcnn_inference_rotated`.
"""
boxes
=
self
.
predict_boxes
(
predictions
,
proposals
)
scores
=
self
.
predict_probs
(
predictions
,
proposals
)
image_shapes
=
[
x
.
image_size
for
x
in
proposals
]
return
fast_rcnn_inference_rotated
(
boxes
,
scores
,
image_shapes
,
self
.
test_score_thresh
,
self
.
test_nms_thresh
,
self
.
test_topk_per_image
,
)
@
ROI_HEADS_REGISTRY
.
register
()
class
RROIHeads
(
StandardROIHeads
):
"""
This class is used by Rotated Fast R-CNN to detect rotated boxes.
For now, it only supports box predictions but not mask or keypoints.
"""
@
configurable
def
__init__
(
self
,
**
kwargs
):
"""
NOTE: this interface is experimental.
"""
super
().
__init__
(
**
kwargs
)
assert
(
not
self
.
mask_on
and
not
self
.
keypoint_on
),
"Mask/Keypoints not supported in Rotated ROIHeads."
assert
not
self
.
train_on_pred_boxes
,
"train_on_pred_boxes not implemented for RROIHeads!"
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
# fmt: on
assert
pooler_type
in
[
"ROIAlignRotated"
],
pooler_type
# assume all channel counts are equal
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
box_head
=
build_box_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
height
=
pooler_resolution
,
width
=
pooler_resolution
)
)
# This line is the only difference v.s. StandardROIHeads
box_predictor
=
RotatedFastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
)
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_head"
:
box_head
,
"box_predictor"
:
box_predictor
,
}
@
torch
.
no_grad
()
def
label_and_sample_proposals
(
self
,
proposals
,
targets
):
"""
Prepare some proposals to be used to train the RROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`StandardROIHeads.forward`
Returns:
list[Instances]: length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the rotated proposal boxes
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
- gt_classes: the ground-truth classification lable for each proposal
"""
if
self
.
proposal_append_gt
:
proposals
=
add_ground_truth_to_proposals
(
targets
,
proposals
)
proposals_with_gt
=
[]
num_fg_samples
=
[]
num_bg_samples
=
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
has_gt
=
len
(
targets_per_image
)
>
0
match_quality_matrix
=
pairwise_iou_rotated
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
matched_idxs
,
matched_labels
=
self
.
proposal_matcher
(
match_quality_matrix
)
sampled_idxs
,
gt_classes
=
self
.
_sample_proposals
(
matched_idxs
,
matched_labels
,
targets_per_image
.
gt_classes
)
proposals_per_image
=
proposals_per_image
[
sampled_idxs
]
proposals_per_image
.
gt_classes
=
gt_classes
if
has_gt
:
sampled_targets
=
matched_idxs
[
sampled_idxs
]
proposals_per_image
.
gt_boxes
=
targets_per_image
.
gt_boxes
[
sampled_targets
]
num_bg_samples
.
append
((
gt_classes
==
self
.
num_classes
).
sum
().
item
())
num_fg_samples
.
append
(
gt_classes
.
numel
()
-
num_bg_samples
[
-
1
])
proposals_with_gt
.
append
(
proposals_per_image
)
# Log the number of fg/bg samples that are selected for training ROI heads
storage
=
get_event_storage
()
storage
.
put_scalar
(
"roi_head/num_fg_samples"
,
np
.
mean
(
num_fg_samples
))
storage
.
put_scalar
(
"roi_head/num_bg_samples"
,
np
.
mean
(
num_bg_samples
))
return
proposals_with_gt
detectron2/modeling/sampling.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
torch
from
detectron2.layers
import
nonzero_tuple
__all__
=
[
"subsample_labels"
]
def
subsample_labels
(
labels
:
torch
.
Tensor
,
num_samples
:
int
,
positive_fraction
:
float
,
bg_label
:
int
):
"""
Return `num_samples` (or fewer, if not enough found)
random samples from `labels` which is a mixture of positives & negatives.
It will try to return as many positives as possible without
exceeding `positive_fraction * num_samples`, and then try to
fill the remaining slots with negatives.
Args:
labels (Tensor): (N, ) label vector with values:
* -1: ignore
* bg_label: background ("negative") class
* otherwise: one or more foreground ("positive") classes
num_samples (int): The total number of labels with value >= 0 to return.
Values that are not sampled will be filled with -1 (ignore).
positive_fraction (float): The number of subsampled labels with values > 0
is `min(num_positives, int(positive_fraction * num_samples))`. The number
of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
In order words, if there are not enough positives, the sample is filled with
negatives. If there are also not enough negatives, then as many elements are
sampled as is possible.
bg_label (int): label index of background ("negative") class.
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive
=
nonzero_tuple
((
labels
!=
-
1
)
&
(
labels
!=
bg_label
))[
0
]
negative
=
nonzero_tuple
(
labels
==
bg_label
)[
0
]
num_pos
=
int
(
num_samples
*
positive_fraction
)
# protect against not enough positive examples
num_pos
=
min
(
positive
.
numel
(),
num_pos
)
num_neg
=
num_samples
-
num_pos
# protect against not enough negative examples
num_neg
=
min
(
negative
.
numel
(),
num_neg
)
# randomly select positive and negative examples
perm1
=
torch
.
randperm
(
positive
.
numel
(),
device
=
positive
.
device
)[:
num_pos
]
perm2
=
torch
.
randperm
(
negative
.
numel
(),
device
=
negative
.
device
)[:
num_neg
]
pos_idx
=
positive
[
perm1
]
neg_idx
=
negative
[
perm2
]
return
pos_idx
,
neg_idx
detectron2/modeling/test_time_augmentation.py
0 → 100644
View file @
b634945d
# Copyright (c) Facebook, Inc. and its affiliates.
import
copy
import
numpy
as
np
from
contextlib
import
contextmanager
from
itertools
import
count
from
typing
import
List
import
torch
from
fvcore.transforms
import
HFlipTransform
,
NoOpTransform
from
torch
import
nn
from
torch.nn.parallel
import
DistributedDataParallel
from
detectron2.config
import
configurable
from
detectron2.data.detection_utils
import
read_image
from
detectron2.data.transforms
import
(
RandomFlip
,
ResizeShortestEdge
,
ResizeTransform
,
apply_augmentations
,
)
from
detectron2.structures
import
Boxes
,
Instances
from
.meta_arch
import
GeneralizedRCNN
from
.postprocessing
import
detector_postprocess
from
.roi_heads.fast_rcnn
import
fast_rcnn_inference_single_image
__all__
=
[
"DatasetMapperTTA"
,
"GeneralizedRCNNWithTTA"
]
class
DatasetMapperTTA
:
"""
Implement test-time augmentation for detection data.
It is a callable which takes a dataset dict from a detection dataset,
and returns a list of dataset dicts where the images
are augmented from the input image by the transformations defined in the config.
This is used for test-time augmentation.
"""
@
configurable
def
__init__
(
self
,
min_sizes
:
List
[
int
],
max_size
:
int
,
flip
:
bool
):
"""
Args:
min_sizes: list of short-edge size to resize the image to
max_size: maximum height or width of resized images
flip: whether to apply flipping augmentation
"""
self
.
min_sizes
=
min_sizes
self
.
max_size
=
max_size
self
.
flip
=
flip
@
classmethod
def
from_config
(
cls
,
cfg
):
return
{
"min_sizes"
:
cfg
.
TEST
.
AUG
.
MIN_SIZES
,
"max_size"
:
cfg
.
TEST
.
AUG
.
MAX_SIZE
,
"flip"
:
cfg
.
TEST
.
AUG
.
FLIP
,
}
def
__call__
(
self
,
dataset_dict
):
"""
Args:
dict: a dict in standard model input format. See tutorials for details.
Returns:
list[dict]:
a list of dicts, which contain augmented version of the input image.
The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``.
Each dict has field "transforms" which is a TransformList,
containing the transforms that are used to generate this image.
"""
numpy_image
=
dataset_dict
[
"image"
].
permute
(
1
,
2
,
0
).
numpy
()
shape
=
numpy_image
.
shape
orig_shape
=
(
dataset_dict
[
"height"
],
dataset_dict
[
"width"
])
if
shape
[:
2
]
!=
orig_shape
:
# It transforms the "original" image in the dataset to the input image
pre_tfm
=
ResizeTransform
(
orig_shape
[
0
],
orig_shape
[
1
],
shape
[
0
],
shape
[
1
])
else
:
pre_tfm
=
NoOpTransform
()
# Create all combinations of augmentations to use
aug_candidates
=
[]
# each element is a list[Augmentation]
for
min_size
in
self
.
min_sizes
:
resize
=
ResizeShortestEdge
(
min_size
,
self
.
max_size
)
aug_candidates
.
append
([
resize
])
# resize only
if
self
.
flip
:
flip
=
RandomFlip
(
prob
=
1.0
)
aug_candidates
.
append
([
resize
,
flip
])
# resize + flip
# Apply all the augmentations
ret
=
[]
for
aug
in
aug_candidates
:
new_image
,
tfms
=
apply_augmentations
(
aug
,
np
.
copy
(
numpy_image
))
torch_image
=
torch
.
from_numpy
(
np
.
ascontiguousarray
(
new_image
.
transpose
(
2
,
0
,
1
)))
dic
=
copy
.
deepcopy
(
dataset_dict
)
dic
[
"transforms"
]
=
pre_tfm
+
tfms
dic
[
"image"
]
=
torch_image
ret
.
append
(
dic
)
return
ret
class
GeneralizedRCNNWithTTA
(
nn
.
Module
):
"""
A GeneralizedRCNN with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`.
"""
def
__init__
(
self
,
cfg
,
model
,
tta_mapper
=
None
,
batch_size
=
3
):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super
().
__init__
()
if
isinstance
(
model
,
DistributedDataParallel
):
model
=
model
.
module
assert
isinstance
(
model
,
GeneralizedRCNN
),
"TTA is only supported on GeneralizedRCNN. Got a model of type {}"
.
format
(
type
(
model
))
self
.
cfg
=
cfg
.
clone
()
assert
not
self
.
cfg
.
MODEL
.
KEYPOINT_ON
,
"TTA for keypoint is not supported yet"
assert
(
not
self
.
cfg
.
MODEL
.
LOAD_PROPOSALS
),
"TTA for pre-computed proposals is not supported yet"
self
.
model
=
model
if
tta_mapper
is
None
:
tta_mapper
=
DatasetMapperTTA
(
cfg
)
self
.
tta_mapper
=
tta_mapper
self
.
batch_size
=
batch_size
@
contextmanager
def
_turn_off_roi_heads
(
self
,
attrs
):
"""
Open a context where some heads in `model.roi_heads` are temporarily turned off.
Args:
attr (list[str]): the attribute in `model.roi_heads` which can be used
to turn off a specific head, e.g., "mask_on", "keypoint_on".
"""
roi_heads
=
self
.
model
.
roi_heads
old
=
{}
for
attr
in
attrs
:
try
:
old
[
attr
]
=
getattr
(
roi_heads
,
attr
)
except
AttributeError
:
# The head may not be implemented in certain ROIHeads
pass
if
len
(
old
.
keys
())
==
0
:
yield
else
:
for
attr
in
old
.
keys
():
setattr
(
roi_heads
,
attr
,
False
)
yield
for
attr
in
old
.
keys
():
setattr
(
roi_heads
,
attr
,
old
[
attr
])
def
_batch_inference
(
self
,
batched_inputs
,
detected_instances
=
None
):
"""
Execute inference on a list of inputs,
using batch size = self.batch_size, instead of the length of the list.
Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference`
"""
if
detected_instances
is
None
:
detected_instances
=
[
None
]
*
len
(
batched_inputs
)
outputs
=
[]
inputs
,
instances
=
[],
[]
for
idx
,
input
,
instance
in
zip
(
count
(),
batched_inputs
,
detected_instances
):
inputs
.
append
(
input
)
instances
.
append
(
instance
)
if
len
(
inputs
)
==
self
.
batch_size
or
idx
==
len
(
batched_inputs
)
-
1
:
outputs
.
extend
(
self
.
model
.
inference
(
inputs
,
instances
if
instances
[
0
]
is
not
None
else
None
,
do_postprocess
=
False
,
)
)
inputs
,
instances
=
[],
[]
return
outputs
def
__call__
(
self
,
batched_inputs
):
"""
Same input/output format as :meth:`GeneralizedRCNN.forward`
"""
def
_maybe_read_image
(
dataset_dict
):
ret
=
copy
.
copy
(
dataset_dict
)
if
"image"
not
in
ret
:
image
=
read_image
(
ret
.
pop
(
"file_name"
),
self
.
model
.
input_format
)
image
=
torch
.
from_numpy
(
np
.
ascontiguousarray
(
image
.
transpose
(
2
,
0
,
1
)))
# CHW
ret
[
"image"
]
=
image
if
"height"
not
in
ret
and
"width"
not
in
ret
:
ret
[
"height"
]
=
image
.
shape
[
1
]
ret
[
"width"
]
=
image
.
shape
[
2
]
return
ret
return
[
self
.
_inference_one_image
(
_maybe_read_image
(
x
))
for
x
in
batched_inputs
]
def
_inference_one_image
(
self
,
input
):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape
=
(
input
[
"height"
],
input
[
"width"
])
augmented_inputs
,
tfms
=
self
.
_get_augmented_inputs
(
input
)
# Detect boxes from all augmented versions
with
self
.
_turn_off_roi_heads
([
"mask_on"
,
"keypoint_on"
]):
# temporarily disable roi heads
all_boxes
,
all_scores
,
all_classes
=
self
.
_get_augmented_boxes
(
augmented_inputs
,
tfms
)
# merge all detected boxes to obtain final predictions for boxes
merged_instances
=
self
.
_merge_detections
(
all_boxes
,
all_scores
,
all_classes
,
orig_shape
)
if
self
.
cfg
.
MODEL
.
MASK_ON
:
# Use the detected boxes to obtain masks
augmented_instances
=
self
.
_rescale_detected_boxes
(
augmented_inputs
,
merged_instances
,
tfms
)
# run forward on the detected boxes
outputs
=
self
.
_batch_inference
(
augmented_inputs
,
augmented_instances
)
# Delete now useless variables to avoid being out of memory
del
augmented_inputs
,
augmented_instances
# average the predictions
merged_instances
.
pred_masks
=
self
.
_reduce_pred_masks
(
outputs
,
tfms
)
merged_instances
=
detector_postprocess
(
merged_instances
,
*
orig_shape
)
return
{
"instances"
:
merged_instances
}
else
:
return
{
"instances"
:
merged_instances
}
def
_get_augmented_inputs
(
self
,
input
):
augmented_inputs
=
self
.
tta_mapper
(
input
)
tfms
=
[
x
.
pop
(
"transforms"
)
for
x
in
augmented_inputs
]
return
augmented_inputs
,
tfms
def
_get_augmented_boxes
(
self
,
augmented_inputs
,
tfms
):
# 1: forward with all augmented images
outputs
=
self
.
_batch_inference
(
augmented_inputs
)
# 2: union the results
all_boxes
=
[]
all_scores
=
[]
all_classes
=
[]
for
output
,
tfm
in
zip
(
outputs
,
tfms
):
# Need to inverse the transforms on boxes, to obtain results on original image
pred_boxes
=
output
.
pred_boxes
.
tensor
original_pred_boxes
=
tfm
.
inverse
().
apply_box
(
pred_boxes
.
cpu
().
numpy
())
all_boxes
.
append
(
torch
.
from_numpy
(
original_pred_boxes
).
to
(
pred_boxes
.
device
))
all_scores
.
extend
(
output
.
scores
)
all_classes
.
extend
(
output
.
pred_classes
)
all_boxes
=
torch
.
cat
(
all_boxes
,
dim
=
0
)
return
all_boxes
,
all_scores
,
all_classes
def
_merge_detections
(
self
,
all_boxes
,
all_scores
,
all_classes
,
shape_hw
):
# select from the union of all results
num_boxes
=
len
(
all_boxes
)
num_classes
=
self
.
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
# +1 because fast_rcnn_inference expects background scores as well
all_scores_2d
=
torch
.
zeros
(
num_boxes
,
num_classes
+
1
,
device
=
all_boxes
.
device
)
for
idx
,
cls
,
score
in
zip
(
count
(),
all_classes
,
all_scores
):
all_scores_2d
[
idx
,
cls
]
=
score
merged_instances
,
_
=
fast_rcnn_inference_single_image
(
all_boxes
,
all_scores_2d
,
shape_hw
,
1e-8
,
self
.
cfg
.
MODEL
.
ROI_HEADS
.
NMS_THRESH_TEST
,
self
.
cfg
.
TEST
.
DETECTIONS_PER_IMAGE
,
)
return
merged_instances
def
_rescale_detected_boxes
(
self
,
augmented_inputs
,
merged_instances
,
tfms
):
augmented_instances
=
[]
for
input
,
tfm
in
zip
(
augmented_inputs
,
tfms
):
# Transform the target box to the augmented image's coordinate space
pred_boxes
=
merged_instances
.
pred_boxes
.
tensor
.
cpu
().
numpy
()
pred_boxes
=
torch
.
from_numpy
(
tfm
.
apply_box
(
pred_boxes
))
aug_instances
=
Instances
(
image_size
=
input
[
"image"
].
shape
[
1
:
3
],
pred_boxes
=
Boxes
(
pred_boxes
),
pred_classes
=
merged_instances
.
pred_classes
,
scores
=
merged_instances
.
scores
,
)
augmented_instances
.
append
(
aug_instances
)
return
augmented_instances
def
_reduce_pred_masks
(
self
,
outputs
,
tfms
):
# Should apply inverse transforms on masks.
# We assume only resize & flip are used. pred_masks is a scale-invariant
# representation, so we handle flip specially
for
output
,
tfm
in
zip
(
outputs
,
tfms
):
if
any
(
isinstance
(
t
,
HFlipTransform
)
for
t
in
tfm
.
transforms
):
output
.
pred_masks
=
output
.
pred_masks
.
flip
(
dims
=
[
3
])
all_pred_masks
=
torch
.
stack
([
o
.
pred_masks
for
o
in
outputs
],
dim
=
0
)
avg_pred_masks
=
torch
.
mean
(
all_pred_masks
,
dim
=
0
)
return
avg_pred_masks
detectron2/projects/README.md
0 → 100644
View file @
b634945d
Projects live in the
[
`projects` directory
](
../../projects
)
under the root of this repository, but not here.
Prev
1
…
10
11
12
13
14
15
16
17
18
…
21
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment