Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
detectron2
Commits
c732df65
Commit
c732df65
authored
Jan 18, 2024
by
limm
Browse files
push v0.1.3 version commit bd2ea47
parent
5b3792fc
Pipeline
#706
failed with stages
in 0 seconds
Changes
424
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
4780 additions
and
0 deletions
+4780
-0
detectron2/modeling/meta_arch/rcnn.py
detectron2/modeling/meta_arch/rcnn.py
+263
-0
detectron2/modeling/meta_arch/retinanet.py
detectron2/modeling/meta_arch/retinanet.py
+489
-0
detectron2/modeling/meta_arch/semantic_seg.py
detectron2/modeling/meta_arch/semantic_seg.py
+186
-0
detectron2/modeling/poolers.py
detectron2/modeling/poolers.py
+231
-0
detectron2/modeling/postprocessing.py
detectron2/modeling/postprocessing.py
+79
-0
detectron2/modeling/proposal_generator/__init__.py
detectron2/modeling/proposal_generator/__init__.py
+3
-0
detectron2/modeling/proposal_generator/build.py
detectron2/modeling/proposal_generator/build.py
+24
-0
detectron2/modeling/proposal_generator/proposal_utils.py
detectron2/modeling/proposal_generator/proposal_utils.py
+57
-0
detectron2/modeling/proposal_generator/rpn.py
detectron2/modeling/proposal_generator/rpn.py
+285
-0
detectron2/modeling/proposal_generator/rpn_outputs.py
detectron2/modeling/proposal_generator/rpn_outputs.py
+323
-0
detectron2/modeling/proposal_generator/rrpn.py
detectron2/modeling/proposal_generator/rrpn.py
+233
-0
detectron2/modeling/roi_heads/__init__.py
detectron2/modeling/roi_heads/__init__.py
+16
-0
detectron2/modeling/roi_heads/box_head.py
detectron2/modeling/roi_heads/box_head.py
+115
-0
detectron2/modeling/roi_heads/cascade_rcnn.py
detectron2/modeling/roi_heads/cascade_rcnn.py
+298
-0
detectron2/modeling/roi_heads/fast_rcnn.py
detectron2/modeling/roi_heads/fast_rcnn.py
+510
-0
detectron2/modeling/roi_heads/keypoint_head.py
detectron2/modeling/roi_heads/keypoint_head.py
+253
-0
detectron2/modeling/roi_heads/mask_head.py
detectron2/modeling/roi_heads/mask_head.py
+277
-0
detectron2/modeling/roi_heads/roi_heads.py
detectron2/modeling/roi_heads/roi_heads.py
+812
-0
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
+276
-0
detectron2/modeling/sampling.py
detectron2/modeling/sampling.py
+50
-0
No files found.
Too many changes to show.
To preserve performance only
424 of 424+
files are displayed.
Plain diff
Email patch
detectron2/modeling/meta_arch/rcnn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
logging
import
numpy
as
np
import
torch
from
torch
import
nn
from
detectron2.structures
import
ImageList
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.logger
import
log_first_n
from
..backbone
import
build_backbone
from
..postprocessing
import
detector_postprocess
from
..proposal_generator
import
build_proposal_generator
from
..roi_heads
import
build_roi_heads
from
.build
import
META_ARCH_REGISTRY
__all__
=
[
"GeneralizedRCNN"
,
"ProposalNetwork"
]
@
META_ARCH_REGISTRY
.
register
()
class
GeneralizedRCNN
(
nn
.
Module
):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def
__init__
(
self
,
cfg
):
super
().
__init__
()
self
.
backbone
=
build_backbone
(
cfg
)
self
.
proposal_generator
=
build_proposal_generator
(
cfg
,
self
.
backbone
.
output_shape
())
self
.
roi_heads
=
build_roi_heads
(
cfg
,
self
.
backbone
.
output_shape
())
self
.
vis_period
=
cfg
.
VIS_PERIOD
self
.
input_format
=
cfg
.
INPUT
.
FORMAT
assert
len
(
cfg
.
MODEL
.
PIXEL_MEAN
)
==
len
(
cfg
.
MODEL
.
PIXEL_STD
)
self
.
register_buffer
(
"pixel_mean"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_MEAN
).
view
(
-
1
,
1
,
1
))
self
.
register_buffer
(
"pixel_std"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_STD
).
view
(
-
1
,
1
,
1
))
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
def
visualize_training
(
self
,
batched_inputs
,
proposals
):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from
detectron2.utils.visualizer
import
Visualizer
storage
=
get_event_storage
()
max_vis_prop
=
20
for
input
,
prop
in
zip
(
batched_inputs
,
proposals
):
img
=
input
[
"image"
].
cpu
().
numpy
()
assert
img
.
shape
[
0
]
==
3
,
"Images should have 3 channels."
if
self
.
input_format
==
"BGR"
:
img
=
img
[::
-
1
,
:,
:]
img
=
img
.
transpose
(
1
,
2
,
0
)
v_gt
=
Visualizer
(
img
,
None
)
v_gt
=
v_gt
.
overlay_instances
(
boxes
=
input
[
"instances"
].
gt_boxes
)
anno_img
=
v_gt
.
get_image
()
box_size
=
min
(
len
(
prop
.
proposal_boxes
),
max_vis_prop
)
v_pred
=
Visualizer
(
img
,
None
)
v_pred
=
v_pred
.
overlay_instances
(
boxes
=
prop
.
proposal_boxes
[
0
:
box_size
].
tensor
.
cpu
().
numpy
()
)
prop_img
=
v_pred
.
get_image
()
vis_img
=
np
.
concatenate
((
anno_img
,
prop_img
),
axis
=
1
)
vis_img
=
vis_img
.
transpose
(
2
,
0
,
1
)
vis_name
=
"Left: GT bounding boxes; Right: Predicted proposals"
storage
.
put_image
(
vis_name
,
vis_img
)
break
# only visualize one image in a batch
def
forward
(
self
,
batched_inputs
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if
not
self
.
training
:
return
self
.
inference
(
batched_inputs
)
images
=
self
.
preprocess_image
(
batched_inputs
)
if
"instances"
in
batched_inputs
[
0
]:
gt_instances
=
[
x
[
"instances"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
elif
"targets"
in
batched_inputs
[
0
]:
log_first_n
(
logging
.
WARN
,
"'targets' in the model inputs is now renamed to 'instances'!"
,
n
=
10
)
gt_instances
=
[
x
[
"targets"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
else
:
gt_instances
=
None
features
=
self
.
backbone
(
images
.
tensor
)
if
self
.
proposal_generator
:
proposals
,
proposal_losses
=
self
.
proposal_generator
(
images
,
features
,
gt_instances
)
else
:
assert
"proposals"
in
batched_inputs
[
0
]
proposals
=
[
x
[
"proposals"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
proposal_losses
=
{}
_
,
detector_losses
=
self
.
roi_heads
(
images
,
features
,
proposals
,
gt_instances
)
if
self
.
vis_period
>
0
:
storage
=
get_event_storage
()
if
storage
.
iter
%
self
.
vis_period
==
0
:
self
.
visualize_training
(
batched_inputs
,
proposals
)
losses
=
{}
losses
.
update
(
detector_losses
)
losses
.
update
(
proposal_losses
)
return
losses
def
inference
(
self
,
batched_inputs
,
detected_instances
=
None
,
do_postprocess
=
True
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert
not
self
.
training
images
=
self
.
preprocess_image
(
batched_inputs
)
features
=
self
.
backbone
(
images
.
tensor
)
if
detected_instances
is
None
:
if
self
.
proposal_generator
:
proposals
,
_
=
self
.
proposal_generator
(
images
,
features
,
None
)
else
:
assert
"proposals"
in
batched_inputs
[
0
]
proposals
=
[
x
[
"proposals"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
results
,
_
=
self
.
roi_heads
(
images
,
features
,
proposals
,
None
)
else
:
detected_instances
=
[
x
.
to
(
self
.
device
)
for
x
in
detected_instances
]
results
=
self
.
roi_heads
.
forward_with_given_boxes
(
features
,
detected_instances
)
if
do_postprocess
:
return
GeneralizedRCNN
.
_postprocess
(
results
,
batched_inputs
,
images
.
image_sizes
)
else
:
return
results
def
preprocess_image
(
self
,
batched_inputs
):
"""
Normalize, pad and batch the input images.
"""
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
self
.
backbone
.
size_divisibility
)
return
images
@
staticmethod
def
_postprocess
(
instances
,
batched_inputs
,
image_sizes
):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results
=
[]
for
results_per_image
,
input_per_image
,
image_size
in
zip
(
instances
,
batched_inputs
,
image_sizes
):
height
=
input_per_image
.
get
(
"height"
,
image_size
[
0
])
width
=
input_per_image
.
get
(
"width"
,
image_size
[
1
])
r
=
detector_postprocess
(
results_per_image
,
height
,
width
)
processed_results
.
append
({
"instances"
:
r
})
return
processed_results
@
META_ARCH_REGISTRY
.
register
()
class
ProposalNetwork
(
nn
.
Module
):
"""
A meta architecture that only predicts object proposals.
"""
def
__init__
(
self
,
cfg
):
super
().
__init__
()
self
.
backbone
=
build_backbone
(
cfg
)
self
.
proposal_generator
=
build_proposal_generator
(
cfg
,
self
.
backbone
.
output_shape
())
self
.
register_buffer
(
"pixel_mean"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_MEAN
).
view
(
-
1
,
1
,
1
))
self
.
register_buffer
(
"pixel_std"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_STD
).
view
(
-
1
,
1
,
1
))
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
def
forward
(
self
,
batched_inputs
):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
self
.
backbone
.
size_divisibility
)
features
=
self
.
backbone
(
images
.
tensor
)
if
"instances"
in
batched_inputs
[
0
]:
gt_instances
=
[
x
[
"instances"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
elif
"targets"
in
batched_inputs
[
0
]:
log_first_n
(
logging
.
WARN
,
"'targets' in the model inputs is now renamed to 'instances'!"
,
n
=
10
)
gt_instances
=
[
x
[
"targets"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
else
:
gt_instances
=
None
proposals
,
proposal_losses
=
self
.
proposal_generator
(
images
,
features
,
gt_instances
)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if
self
.
training
:
return
proposal_losses
processed_results
=
[]
for
results_per_image
,
input_per_image
,
image_size
in
zip
(
proposals
,
batched_inputs
,
images
.
image_sizes
):
height
=
input_per_image
.
get
(
"height"
,
image_size
[
0
])
width
=
input_per_image
.
get
(
"width"
,
image_size
[
1
])
r
=
detector_postprocess
(
results_per_image
,
height
,
width
)
processed_results
.
append
({
"proposals"
:
r
})
return
processed_results
detectron2/modeling/meta_arch/retinanet.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
logging
import
math
import
numpy
as
np
from
typing
import
List
import
torch
from
fvcore.nn
import
sigmoid_focal_loss_jit
,
smooth_l1_loss
from
torch
import
nn
from
detectron2.layers
import
ShapeSpec
,
batched_nms
,
cat
from
detectron2.structures
import
Boxes
,
ImageList
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.logger
import
log_first_n
from
..anchor_generator
import
build_anchor_generator
from
..backbone
import
build_backbone
from
..box_regression
import
Box2BoxTransform
from
..matcher
import
Matcher
from
..postprocessing
import
detector_postprocess
from
.build
import
META_ARCH_REGISTRY
__all__
=
[
"RetinaNet"
]
def
permute_to_N_HWA_K
(
tensor
,
K
):
"""
Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K)
"""
assert
tensor
.
dim
()
==
4
,
tensor
.
shape
N
,
_
,
H
,
W
=
tensor
.
shape
tensor
=
tensor
.
view
(
N
,
-
1
,
K
,
H
,
W
)
tensor
=
tensor
.
permute
(
0
,
3
,
4
,
1
,
2
)
tensor
=
tensor
.
reshape
(
N
,
-
1
,
K
)
# Size=(N,HWA,K)
return
tensor
def
permute_all_cls_and_box_to_N_HWA_K_and_concat
(
box_cls
,
box_delta
,
num_classes
=
80
):
"""
Rearrange the tensor layout from the network output, i.e.:
list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)
to per-image predictions, i.e.:
Tensor: of shape (N x sum(Hi x Wi x A), K)
"""
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_delta
box_cls_flattened
=
[
permute_to_N_HWA_K
(
x
,
num_classes
)
for
x
in
box_cls
]
box_delta_flattened
=
[
permute_to_N_HWA_K
(
x
,
4
)
for
x
in
box_delta
]
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls
=
cat
(
box_cls_flattened
,
dim
=
1
).
view
(
-
1
,
num_classes
)
box_delta
=
cat
(
box_delta_flattened
,
dim
=
1
).
view
(
-
1
,
4
)
return
box_cls
,
box_delta
@
META_ARCH_REGISTRY
.
register
()
class
RetinaNet
(
nn
.
Module
):
"""
Implement RetinaNet in :paper:`RetinaNet`.
"""
def
__init__
(
self
,
cfg
):
super
().
__init__
()
# fmt: off
self
.
num_classes
=
cfg
.
MODEL
.
RETINANET
.
NUM_CLASSES
self
.
in_features
=
cfg
.
MODEL
.
RETINANET
.
IN_FEATURES
# Loss parameters:
self
.
focal_loss_alpha
=
cfg
.
MODEL
.
RETINANET
.
FOCAL_LOSS_ALPHA
self
.
focal_loss_gamma
=
cfg
.
MODEL
.
RETINANET
.
FOCAL_LOSS_GAMMA
self
.
smooth_l1_loss_beta
=
cfg
.
MODEL
.
RETINANET
.
SMOOTH_L1_LOSS_BETA
# Inference parameters:
self
.
score_threshold
=
cfg
.
MODEL
.
RETINANET
.
SCORE_THRESH_TEST
self
.
topk_candidates
=
cfg
.
MODEL
.
RETINANET
.
TOPK_CANDIDATES_TEST
self
.
nms_threshold
=
cfg
.
MODEL
.
RETINANET
.
NMS_THRESH_TEST
self
.
max_detections_per_image
=
cfg
.
TEST
.
DETECTIONS_PER_IMAGE
# Vis parameters
self
.
vis_period
=
cfg
.
VIS_PERIOD
self
.
input_format
=
cfg
.
INPUT
.
FORMAT
# fmt: on
self
.
backbone
=
build_backbone
(
cfg
)
backbone_shape
=
self
.
backbone
.
output_shape
()
feature_shapes
=
[
backbone_shape
[
f
]
for
f
in
self
.
in_features
]
self
.
head
=
RetinaNetHead
(
cfg
,
feature_shapes
)
self
.
anchor_generator
=
build_anchor_generator
(
cfg
,
feature_shapes
)
# Matching and loss
self
.
box2box_transform
=
Box2BoxTransform
(
weights
=
cfg
.
MODEL
.
RPN
.
BBOX_REG_WEIGHTS
)
self
.
matcher
=
Matcher
(
cfg
.
MODEL
.
RETINANET
.
IOU_THRESHOLDS
,
cfg
.
MODEL
.
RETINANET
.
IOU_LABELS
,
allow_low_quality_matches
=
True
,
)
self
.
register_buffer
(
"pixel_mean"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_MEAN
).
view
(
-
1
,
1
,
1
))
self
.
register_buffer
(
"pixel_std"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_STD
).
view
(
-
1
,
1
,
1
))
"""
In Detectron1, loss is normalized by number of foreground samples in the batch.
When batch size is 1 per GPU, #foreground has a large variance and
using it lead to lower performance. Here we maintain an EMA of #foreground to
stabilize the normalizer.
"""
self
.
loss_normalizer
=
100
# initialize with any reasonable #fg that's not too small
self
.
loss_normalizer_momentum
=
0.9
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
def
visualize_training
(
self
,
batched_inputs
,
results
):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from
detectron2.utils.visualizer
import
Visualizer
assert
len
(
batched_inputs
)
==
len
(
results
),
"Cannot visualize inputs and results of different sizes"
storage
=
get_event_storage
()
max_boxes
=
20
image_index
=
0
# only visualize a single image
img
=
batched_inputs
[
image_index
][
"image"
].
cpu
().
numpy
()
assert
img
.
shape
[
0
]
==
3
,
"Images should have 3 channels."
if
self
.
input_format
==
"BGR"
:
img
=
img
[::
-
1
,
:,
:]
img
=
img
.
transpose
(
1
,
2
,
0
)
v_gt
=
Visualizer
(
img
,
None
)
v_gt
=
v_gt
.
overlay_instances
(
boxes
=
batched_inputs
[
image_index
][
"instances"
].
gt_boxes
)
anno_img
=
v_gt
.
get_image
()
processed_results
=
detector_postprocess
(
results
[
image_index
],
img
.
shape
[
0
],
img
.
shape
[
1
])
predicted_boxes
=
processed_results
.
pred_boxes
.
tensor
.
detach
().
cpu
().
numpy
()
v_pred
=
Visualizer
(
img
,
None
)
v_pred
=
v_pred
.
overlay_instances
(
boxes
=
predicted_boxes
[
0
:
max_boxes
])
prop_img
=
v_pred
.
get_image
()
vis_img
=
np
.
vstack
((
anno_img
,
prop_img
))
vis_img
=
vis_img
.
transpose
(
2
,
0
,
1
)
vis_name
=
f
"Top: GT bounding boxes; Bottom:
{
max_boxes
}
Highest Scoring Results"
storage
.
put_image
(
vis_name
,
vis_img
)
def
forward
(
self
,
batched_inputs
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images
=
self
.
preprocess_image
(
batched_inputs
)
if
"instances"
in
batched_inputs
[
0
]:
gt_instances
=
[
x
[
"instances"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
elif
"targets"
in
batched_inputs
[
0
]:
log_first_n
(
logging
.
WARN
,
"'targets' in the model inputs is now renamed to 'instances'!"
,
n
=
10
)
gt_instances
=
[
x
[
"targets"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
else
:
gt_instances
=
None
features
=
self
.
backbone
(
images
.
tensor
)
features
=
[
features
[
f
]
for
f
in
self
.
in_features
]
box_cls
,
box_delta
=
self
.
head
(
features
)
anchors
=
self
.
anchor_generator
(
features
)
if
self
.
training
:
gt_classes
,
gt_anchors_reg_deltas
=
self
.
get_ground_truth
(
anchors
,
gt_instances
)
losses
=
self
.
losses
(
gt_classes
,
gt_anchors_reg_deltas
,
box_cls
,
box_delta
)
if
self
.
vis_period
>
0
:
storage
=
get_event_storage
()
if
storage
.
iter
%
self
.
vis_period
==
0
:
results
=
self
.
inference
(
box_cls
,
box_delta
,
anchors
,
images
.
image_sizes
)
self
.
visualize_training
(
batched_inputs
,
results
)
return
losses
else
:
results
=
self
.
inference
(
box_cls
,
box_delta
,
anchors
,
images
.
image_sizes
)
processed_results
=
[]
for
results_per_image
,
input_per_image
,
image_size
in
zip
(
results
,
batched_inputs
,
images
.
image_sizes
):
height
=
input_per_image
.
get
(
"height"
,
image_size
[
0
])
width
=
input_per_image
.
get
(
"width"
,
image_size
[
1
])
r
=
detector_postprocess
(
results_per_image
,
height
,
width
)
processed_results
.
append
({
"instances"
:
r
})
return
processed_results
def
losses
(
self
,
gt_classes
,
gt_anchors_deltas
,
pred_class_logits
,
pred_anchor_deltas
):
"""
Args:
For `gt_classes` and `gt_anchors_deltas` parameters, see
:meth:`RetinaNet.get_ground_truth`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x A)
For `pred_class_logits` and `pred_anchor_deltas`, see
:meth:`RetinaNetHead.forward`.
Returns:
dict[str, Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
pred_class_logits
,
pred_anchor_deltas
=
permute_all_cls_and_box_to_N_HWA_K_and_concat
(
pred_class_logits
,
pred_anchor_deltas
,
self
.
num_classes
)
# Shapes: (N x R, K) and (N x R, 4), respectively.
gt_classes
=
gt_classes
.
flatten
()
gt_anchors_deltas
=
gt_anchors_deltas
.
view
(
-
1
,
4
)
valid_idxs
=
gt_classes
>=
0
foreground_idxs
=
(
gt_classes
>=
0
)
&
(
gt_classes
!=
self
.
num_classes
)
num_foreground
=
foreground_idxs
.
sum
().
item
()
get_event_storage
().
put_scalar
(
"num_foreground"
,
num_foreground
)
self
.
loss_normalizer
=
(
self
.
loss_normalizer_momentum
*
self
.
loss_normalizer
+
(
1
-
self
.
loss_normalizer_momentum
)
*
num_foreground
)
gt_classes_target
=
torch
.
zeros_like
(
pred_class_logits
)
gt_classes_target
[
foreground_idxs
,
gt_classes
[
foreground_idxs
]]
=
1
# logits loss
loss_cls
=
sigmoid_focal_loss_jit
(
pred_class_logits
[
valid_idxs
],
gt_classes_target
[
valid_idxs
],
alpha
=
self
.
focal_loss_alpha
,
gamma
=
self
.
focal_loss_gamma
,
reduction
=
"sum"
,
)
/
max
(
1
,
self
.
loss_normalizer
)
# regression loss
loss_box_reg
=
smooth_l1_loss
(
pred_anchor_deltas
[
foreground_idxs
],
gt_anchors_deltas
[
foreground_idxs
],
beta
=
self
.
smooth_l1_loss_beta
,
reduction
=
"sum"
,
)
/
max
(
1
,
self
.
loss_normalizer
)
return
{
"loss_cls"
:
loss_cls
,
"loss_box_reg"
:
loss_box_reg
}
@
torch
.
no_grad
()
def
get_ground_truth
(
self
,
anchors
,
targets
):
"""
Args:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contains anchors of this image on the specific feature level.
targets (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
Returns:
gt_classes (Tensor):
An integer tensor of shape (N, R) storing ground-truth labels for each anchor.
R is the total number of anchors, i.e. the sum of Hi x Wi x A for all levels.
Anchors with an IoU with some target higher than the foreground threshold
are assigned their corresponding label in the [0, K-1] range.
Anchors whose IoU are below the background threshold are assigned
the label "K". Anchors whose IoU are between the foreground and background
thresholds are assigned a label "-1", i.e. ignore.
gt_anchors_deltas (Tensor):
Shape (N, R, 4).
The last dimension represents ground-truth box2box transform
targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box.
The values in the tensor are meaningful only when the corresponding
anchor is labeled as foreground.
"""
gt_classes
=
[]
gt_anchors_deltas
=
[]
anchors
=
Boxes
.
cat
(
anchors
)
# Rx4
for
targets_per_image
in
targets
:
match_quality_matrix
=
pairwise_iou
(
targets_per_image
.
gt_boxes
,
anchors
)
gt_matched_idxs
,
anchor_labels
=
self
.
matcher
(
match_quality_matrix
)
has_gt
=
len
(
targets_per_image
)
>
0
if
has_gt
:
# ground truth box regression
matched_gt_boxes
=
targets_per_image
.
gt_boxes
[
gt_matched_idxs
]
gt_anchors_reg_deltas_i
=
self
.
box2box_transform
.
get_deltas
(
anchors
.
tensor
,
matched_gt_boxes
.
tensor
)
gt_classes_i
=
targets_per_image
.
gt_classes
[
gt_matched_idxs
]
# Anchors with label 0 are treated as background.
gt_classes_i
[
anchor_labels
==
0
]
=
self
.
num_classes
# Anchors with label -1 are ignored.
gt_classes_i
[
anchor_labels
==
-
1
]
=
-
1
else
:
gt_classes_i
=
torch
.
zeros_like
(
gt_matched_idxs
)
+
self
.
num_classes
gt_anchors_reg_deltas_i
=
torch
.
zeros_like
(
anchors
.
tensor
)
gt_classes
.
append
(
gt_classes_i
)
gt_anchors_deltas
.
append
(
gt_anchors_reg_deltas_i
)
return
torch
.
stack
(
gt_classes
),
torch
.
stack
(
gt_anchors_deltas
)
def
inference
(
self
,
box_cls
,
box_delta
,
anchors
,
image_sizes
):
"""
Arguments:
box_cls, box_delta: Same as the output of :meth:`RetinaNetHead.forward`
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contain anchors of this image on the specific feature level.
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
results
=
[]
box_cls
=
[
permute_to_N_HWA_K
(
x
,
self
.
num_classes
)
for
x
in
box_cls
]
box_delta
=
[
permute_to_N_HWA_K
(
x
,
4
)
for
x
in
box_delta
]
# list[Tensor], one per level, each has shape (N, Hi x Wi x A, K or 4)
for
img_idx
,
image_size
in
enumerate
(
image_sizes
):
box_cls_per_image
=
[
box_cls_per_level
[
img_idx
]
for
box_cls_per_level
in
box_cls
]
box_reg_per_image
=
[
box_reg_per_level
[
img_idx
]
for
box_reg_per_level
in
box_delta
]
results_per_image
=
self
.
inference_single_image
(
box_cls_per_image
,
box_reg_per_image
,
anchors
,
tuple
(
image_size
)
)
results
.
append
(
results_per_image
)
return
results
def
inference_single_image
(
self
,
box_cls
,
box_delta
,
anchors
,
image_size
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, K)
box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors for that
image in that feature level.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
boxes_all
=
[]
scores_all
=
[]
class_idxs_all
=
[]
# Iterate over every feature level
for
box_cls_i
,
box_reg_i
,
anchors_i
in
zip
(
box_cls
,
box_delta
,
anchors
):
# (HxWxAxK,)
box_cls_i
=
box_cls_i
.
flatten
().
sigmoid_
()
# Keep top k top scoring indices only.
num_topk
=
min
(
self
.
topk_candidates
,
box_reg_i
.
size
(
0
))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob
,
topk_idxs
=
box_cls_i
.
sort
(
descending
=
True
)
predicted_prob
=
predicted_prob
[:
num_topk
]
topk_idxs
=
topk_idxs
[:
num_topk
]
# filter out the proposals with low confidence score
keep_idxs
=
predicted_prob
>
self
.
score_threshold
predicted_prob
=
predicted_prob
[
keep_idxs
]
topk_idxs
=
topk_idxs
[
keep_idxs
]
anchor_idxs
=
topk_idxs
//
self
.
num_classes
classes_idxs
=
topk_idxs
%
self
.
num_classes
box_reg_i
=
box_reg_i
[
anchor_idxs
]
anchors_i
=
anchors_i
[
anchor_idxs
]
# predict boxes
predicted_boxes
=
self
.
box2box_transform
.
apply_deltas
(
box_reg_i
,
anchors_i
.
tensor
)
boxes_all
.
append
(
predicted_boxes
)
scores_all
.
append
(
predicted_prob
)
class_idxs_all
.
append
(
classes_idxs
)
boxes_all
,
scores_all
,
class_idxs_all
=
[
cat
(
x
)
for
x
in
[
boxes_all
,
scores_all
,
class_idxs_all
]
]
keep
=
batched_nms
(
boxes_all
,
scores_all
,
class_idxs_all
,
self
.
nms_threshold
)
keep
=
keep
[:
self
.
max_detections_per_image
]
result
=
Instances
(
image_size
)
result
.
pred_boxes
=
Boxes
(
boxes_all
[
keep
])
result
.
scores
=
scores_all
[
keep
]
result
.
pred_classes
=
class_idxs_all
[
keep
]
return
result
def
preprocess_image
(
self
,
batched_inputs
):
"""
Normalize, pad and batch the input images.
"""
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
self
.
backbone
.
size_divisibility
)
return
images
class
RetinaNetHead
(
nn
.
Module
):
"""
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
"""
def
__init__
(
self
,
cfg
,
input_shape
:
List
[
ShapeSpec
]):
super
().
__init__
()
# fmt: off
in_channels
=
input_shape
[
0
].
channels
num_classes
=
cfg
.
MODEL
.
RETINANET
.
NUM_CLASSES
num_convs
=
cfg
.
MODEL
.
RETINANET
.
NUM_CONVS
prior_prob
=
cfg
.
MODEL
.
RETINANET
.
PRIOR_PROB
num_anchors
=
build_anchor_generator
(
cfg
,
input_shape
).
num_cell_anchors
# fmt: on
assert
(
len
(
set
(
num_anchors
))
==
1
),
"Using different number of anchors between levels is not currently supported!"
num_anchors
=
num_anchors
[
0
]
cls_subnet
=
[]
bbox_subnet
=
[]
for
_
in
range
(
num_convs
):
cls_subnet
.
append
(
nn
.
Conv2d
(
in_channels
,
in_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
)
cls_subnet
.
append
(
nn
.
ReLU
())
bbox_subnet
.
append
(
nn
.
Conv2d
(
in_channels
,
in_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
)
bbox_subnet
.
append
(
nn
.
ReLU
())
self
.
cls_subnet
=
nn
.
Sequential
(
*
cls_subnet
)
self
.
bbox_subnet
=
nn
.
Sequential
(
*
bbox_subnet
)
self
.
cls_score
=
nn
.
Conv2d
(
in_channels
,
num_anchors
*
num_classes
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
bbox_pred
=
nn
.
Conv2d
(
in_channels
,
num_anchors
*
4
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
# Initialization
for
modules
in
[
self
.
cls_subnet
,
self
.
bbox_subnet
,
self
.
cls_score
,
self
.
bbox_pred
]:
for
layer
in
modules
.
modules
():
if
isinstance
(
layer
,
nn
.
Conv2d
):
torch
.
nn
.
init
.
normal_
(
layer
.
weight
,
mean
=
0
,
std
=
0.01
)
torch
.
nn
.
init
.
constant_
(
layer
.
bias
,
0
)
# Use prior in model initialization to improve stability
bias_value
=
-
(
math
.
log
((
1
-
prior_prob
)
/
prior_prob
))
torch
.
nn
.
init
.
constant_
(
self
.
cls_score
.
bias
,
bias_value
)
def
forward
(
self
,
features
):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
"""
logits
=
[]
bbox_reg
=
[]
for
feature
in
features
:
logits
.
append
(
self
.
cls_score
(
self
.
cls_subnet
(
feature
)))
bbox_reg
.
append
(
self
.
bbox_pred
(
self
.
bbox_subnet
(
feature
)))
return
logits
,
bbox_reg
detectron2/modeling/meta_arch/semantic_seg.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
numpy
as
np
from
typing
import
Dict
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.layers
import
Conv2d
,
ShapeSpec
from
detectron2.structures
import
ImageList
from
detectron2.utils.registry
import
Registry
from
..backbone
import
build_backbone
from
..postprocessing
import
sem_seg_postprocess
from
.build
import
META_ARCH_REGISTRY
__all__
=
[
"SemanticSegmentor"
,
"SEM_SEG_HEADS_REGISTRY"
,
"SemSegFPNHead"
,
"build_sem_seg_head"
]
SEM_SEG_HEADS_REGISTRY
=
Registry
(
"SEM_SEG_HEADS"
)
SEM_SEG_HEADS_REGISTRY
.
__doc__
=
"""
Registry for semantic segmentation heads, which make semantic segmentation predictions
from feature maps.
"""
@
META_ARCH_REGISTRY
.
register
()
class
SemanticSegmentor
(
nn
.
Module
):
"""
Main class for semantic segmentation architectures.
"""
def
__init__
(
self
,
cfg
):
super
().
__init__
()
self
.
backbone
=
build_backbone
(
cfg
)
self
.
sem_seg_head
=
build_sem_seg_head
(
cfg
,
self
.
backbone
.
output_shape
())
self
.
register_buffer
(
"pixel_mean"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_MEAN
).
view
(
-
1
,
1
,
1
))
self
.
register_buffer
(
"pixel_std"
,
torch
.
Tensor
(
cfg
.
MODEL
.
PIXEL_STD
).
view
(
-
1
,
1
,
1
))
@
property
def
device
(
self
):
return
self
.
pixel_mean
.
device
def
forward
(
self
,
batched_inputs
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "sem_seg": semantic segmentation ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "sem_seg" whose value is a
Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
"""
images
=
[
x
[
"image"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
images
=
[(
x
-
self
.
pixel_mean
)
/
self
.
pixel_std
for
x
in
images
]
images
=
ImageList
.
from_tensors
(
images
,
self
.
backbone
.
size_divisibility
)
features
=
self
.
backbone
(
images
.
tensor
)
if
"sem_seg"
in
batched_inputs
[
0
]:
targets
=
[
x
[
"sem_seg"
].
to
(
self
.
device
)
for
x
in
batched_inputs
]
targets
=
ImageList
.
from_tensors
(
targets
,
self
.
backbone
.
size_divisibility
,
self
.
sem_seg_head
.
ignore_value
).
tensor
else
:
targets
=
None
results
,
losses
=
self
.
sem_seg_head
(
features
,
targets
)
if
self
.
training
:
return
losses
processed_results
=
[]
for
result
,
input_per_image
,
image_size
in
zip
(
results
,
batched_inputs
,
images
.
image_sizes
):
height
=
input_per_image
.
get
(
"height"
)
width
=
input_per_image
.
get
(
"width"
)
r
=
sem_seg_postprocess
(
result
,
image_size
,
height
,
width
)
processed_results
.
append
({
"sem_seg"
:
r
})
return
processed_results
def
build_sem_seg_head
(
cfg
,
input_shape
):
"""
Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NAME
return
SEM_SEG_HEADS_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
@
SEM_SEG_HEADS_REGISTRY
.
register
()
class
SemSegFPNHead
(
nn
.
Module
):
"""
A semantic segmentation head described in :paper:`PanopticFPN`.
It takes FPN features as input and merges information from all
levels of the FPN into single output.
"""
def
__init__
(
self
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
super
().
__init__
()
# fmt: off
self
.
in_features
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
IN_FEATURES
feature_strides
=
{
k
:
v
.
stride
for
k
,
v
in
input_shape
.
items
()}
feature_channels
=
{
k
:
v
.
channels
for
k
,
v
in
input_shape
.
items
()}
self
.
ignore_value
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
IGNORE_VALUE
num_classes
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NUM_CLASSES
conv_dims
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
CONVS_DIM
self
.
common_stride
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
COMMON_STRIDE
norm
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
NORM
self
.
loss_weight
=
cfg
.
MODEL
.
SEM_SEG_HEAD
.
LOSS_WEIGHT
# fmt: on
self
.
scale_heads
=
[]
for
in_feature
in
self
.
in_features
:
head_ops
=
[]
head_length
=
max
(
1
,
int
(
np
.
log2
(
feature_strides
[
in_feature
])
-
np
.
log2
(
self
.
common_stride
))
)
for
k
in
range
(
head_length
):
norm_module
=
nn
.
GroupNorm
(
32
,
conv_dims
)
if
norm
==
"GN"
else
None
conv
=
Conv2d
(
feature_channels
[
in_feature
]
if
k
==
0
else
conv_dims
,
conv_dims
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
not
norm
,
norm
=
norm_module
,
activation
=
F
.
relu
,
)
weight_init
.
c2_msra_fill
(
conv
)
head_ops
.
append
(
conv
)
if
feature_strides
[
in_feature
]
!=
self
.
common_stride
:
head_ops
.
append
(
nn
.
Upsample
(
scale_factor
=
2
,
mode
=
"bilinear"
,
align_corners
=
False
)
)
self
.
scale_heads
.
append
(
nn
.
Sequential
(
*
head_ops
))
self
.
add_module
(
in_feature
,
self
.
scale_heads
[
-
1
])
self
.
predictor
=
Conv2d
(
conv_dims
,
num_classes
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
)
weight_init
.
c2_msra_fill
(
self
.
predictor
)
def
forward
(
self
,
features
,
targets
=
None
):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x
=
self
.
layers
(
features
)
if
self
.
training
:
return
None
,
self
.
losses
(
x
,
targets
)
else
:
x
=
F
.
interpolate
(
x
,
scale_factor
=
self
.
common_stride
,
mode
=
"bilinear"
,
align_corners
=
False
)
return
x
,
{}
def
layers
(
self
,
features
):
for
i
,
f
in
enumerate
(
self
.
in_features
):
if
i
==
0
:
x
=
self
.
scale_heads
[
i
](
features
[
f
])
else
:
x
=
x
+
self
.
scale_heads
[
i
](
features
[
f
])
x
=
self
.
predictor
(
x
)
return
x
def
losses
(
self
,
predictions
,
targets
):
predictions
=
F
.
interpolate
(
predictions
,
scale_factor
=
self
.
common_stride
,
mode
=
"bilinear"
,
align_corners
=
False
)
loss
=
F
.
cross_entropy
(
predictions
,
targets
,
reduction
=
"mean"
,
ignore_index
=
self
.
ignore_value
)
losses
=
{
"loss_sem_seg"
:
loss
*
self
.
loss_weight
}
return
losses
detectron2/modeling/poolers.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import
math
import
sys
import
torch
from
torch
import
nn
from
torchvision.ops
import
RoIPool
from
detectron2.layers
import
ROIAlign
,
ROIAlignRotated
,
cat
__all__
=
[
"ROIPooler"
]
def
assign_boxes_to_levels
(
box_lists
,
min_level
,
max_level
,
canonical_box_size
,
canonical_level
):
"""
Map each box in `box_lists` to a feature map level index and return the assignment
vector.
Args:
box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes,
where N is the number of images in the batch.
min_level (int): Smallest feature map level index. The input is considered index 0,
the output of stage 1 is index 1, and so.
max_level (int): Largest feature map level index.
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)).
canonical_level (int): The feature map level index on which a canonically-sized box
should be placed.
Returns:
A tensor of length M, where M is the total number of boxes aggregated over all
N batch images. The memory layout corresponds to the concatenation of boxes
from all images. Each element is the feature map index, as an offset from
`self.min_level`, for the corresponding box (so value i means the box is at
`self.min_level + i`).
"""
eps
=
sys
.
float_info
.
epsilon
box_sizes
=
torch
.
sqrt
(
cat
([
boxes
.
area
()
for
boxes
in
box_lists
]))
# Eqn.(1) in FPN paper
level_assignments
=
torch
.
floor
(
canonical_level
+
torch
.
log2
(
box_sizes
/
canonical_box_size
+
eps
)
)
# clamp level to (min, max), in case the box size is too large or too small
# for the available feature maps
level_assignments
=
torch
.
clamp
(
level_assignments
,
min
=
min_level
,
max
=
max_level
)
return
level_assignments
.
to
(
torch
.
int64
)
-
min_level
def
convert_boxes_to_pooler_format
(
box_lists
):
"""
Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops
(see description under Returns).
Args:
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
Returns:
When input is list[Boxes]:
A tensor of shape (M, 5), where M is the total number of boxes aggregated over all
N batch images.
The 5 columns are (batch index, x0, y0, x1, y1), where batch index
is the index in [0, N) identifying which batch image the box with corners at
(x0, y0, x1, y1) comes from.
When input is list[RotatedBoxes]:
A tensor of shape (M, 6), where M is the total number of boxes aggregated over all
N batch images.
The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees),
where batch index is the index in [0, N) identifying which batch image the
rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from.
"""
def
fmt_box_list
(
box_tensor
,
batch_index
):
repeated_index
=
torch
.
full
(
(
len
(
box_tensor
),
1
),
batch_index
,
dtype
=
box_tensor
.
dtype
,
device
=
box_tensor
.
device
)
return
cat
((
repeated_index
,
box_tensor
),
dim
=
1
)
pooler_fmt_boxes
=
cat
(
[
fmt_box_list
(
box_list
.
tensor
,
i
)
for
i
,
box_list
in
enumerate
(
box_lists
)],
dim
=
0
)
return
pooler_fmt_boxes
class
ROIPooler
(
nn
.
Module
):
"""
Region of interest feature map pooler that supports pooling from one or more
feature maps.
"""
def
__init__
(
self
,
output_size
,
scales
,
sampling_ratio
,
pooler_type
,
canonical_box_size
=
224
,
canonical_level
=
4
,
):
"""
Args:
output_size (int, tuple[int] or list[int]): output size of the pooled region,
e.g., 14 x 14. If tuple or list is given, the length must be 2.
scales (list[float]): The scale for each low-level pooling op relative to
the input image. For a feature map with stride s relative to the input
image, scale is defined as a 1 / s. The stride must be power of 2.
When there are multiple scales, they must form a pyramid, i.e. they must be
a monotically decreasing geometric sequence with a factor of 1/2.
sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
pooler_type (string): Name of the type of pooling operation that should be applied.
For instance, "ROIPool" or "ROIAlignV2".
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
pre-training).
canonical_level (int): The feature map level index from which a canonically-sized box
should be placed. The default is defined as level 4 (stride=16) in the FPN paper,
i.e., a box of size 224x224 will be placed on the feature with stride=16.
The box placement for all boxes will be determined from their sizes w.r.t
canonical_box_size. For example, a box whose area is 4x that of a canonical box
should be used to pool features from feature level ``canonical_level+1``.
Note that the actual input feature maps given to this module may not have
sufficiently many levels for the input boxes. If the boxes are too large or too
small for the input feature maps, the closest level will be used.
"""
super
().
__init__
()
if
isinstance
(
output_size
,
int
):
output_size
=
(
output_size
,
output_size
)
assert
len
(
output_size
)
==
2
assert
isinstance
(
output_size
[
0
],
int
)
and
isinstance
(
output_size
[
1
],
int
)
self
.
output_size
=
output_size
if
pooler_type
==
"ROIAlign"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlign
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
,
aligned
=
False
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIAlignV2"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlign
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
,
aligned
=
True
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIPool"
:
self
.
level_poolers
=
nn
.
ModuleList
(
RoIPool
(
output_size
,
spatial_scale
=
scale
)
for
scale
in
scales
)
elif
pooler_type
==
"ROIAlignRotated"
:
self
.
level_poolers
=
nn
.
ModuleList
(
ROIAlignRotated
(
output_size
,
spatial_scale
=
scale
,
sampling_ratio
=
sampling_ratio
)
for
scale
in
scales
)
else
:
raise
ValueError
(
"Unknown pooler type: {}"
.
format
(
pooler_type
))
# Map scale (defined as 1 / stride) to its feature map level under the
# assumption that stride is a power of 2.
min_level
=
-
(
math
.
log2
(
scales
[
0
]))
max_level
=
-
(
math
.
log2
(
scales
[
-
1
]))
assert
math
.
isclose
(
min_level
,
int
(
min_level
))
and
math
.
isclose
(
max_level
,
int
(
max_level
)
),
"Featuremap stride is not power of 2!"
self
.
min_level
=
int
(
min_level
)
self
.
max_level
=
int
(
max_level
)
assert
(
len
(
scales
)
==
self
.
max_level
-
self
.
min_level
+
1
),
"[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
assert
0
<
self
.
min_level
and
self
.
min_level
<=
self
.
max_level
self
.
canonical_level
=
canonical_level
assert
canonical_box_size
>
0
self
.
canonical_box_size
=
canonical_box_size
def
forward
(
self
,
x
,
box_lists
):
"""
Args:
x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those
used to construct this module.
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
The box coordinates are defined on the original image and
will be scaled by the `scales` argument of :class:`ROIPooler`.
Returns:
Tensor:
A tensor of shape (M, C, output_size, output_size) where M is the total number of
boxes aggregated over all N batch images and C is the number of channels in `x`.
"""
num_level_assignments
=
len
(
self
.
level_poolers
)
assert
isinstance
(
x
,
list
)
and
isinstance
(
box_lists
,
list
),
"Arguments to pooler must be lists"
assert
(
len
(
x
)
==
num_level_assignments
),
"unequal value, num_level_assignments={}, but x is list of {} Tensors"
.
format
(
num_level_assignments
,
len
(
x
)
)
assert
len
(
box_lists
)
==
x
[
0
].
size
(
0
),
"unequal value, x[0] batch dim 0 is {}, but box_list has length {}"
.
format
(
x
[
0
].
size
(
0
),
len
(
box_lists
)
)
pooler_fmt_boxes
=
convert_boxes_to_pooler_format
(
box_lists
)
if
num_level_assignments
==
1
:
return
self
.
level_poolers
[
0
](
x
[
0
],
pooler_fmt_boxes
)
level_assignments
=
assign_boxes_to_levels
(
box_lists
,
self
.
min_level
,
self
.
max_level
,
self
.
canonical_box_size
,
self
.
canonical_level
)
num_boxes
=
len
(
pooler_fmt_boxes
)
num_channels
=
x
[
0
].
shape
[
1
]
output_size
=
self
.
output_size
[
0
]
dtype
,
device
=
x
[
0
].
dtype
,
x
[
0
].
device
output
=
torch
.
zeros
(
(
num_boxes
,
num_channels
,
output_size
,
output_size
),
dtype
=
dtype
,
device
=
device
)
for
level
,
(
x_level
,
pooler
)
in
enumerate
(
zip
(
x
,
self
.
level_poolers
)):
inds
=
torch
.
nonzero
(
level_assignments
==
level
,
as_tuple
=
True
)[
0
]
pooler_fmt_boxes_level
=
pooler_fmt_boxes
[
inds
]
output
[
inds
]
=
pooler
(
x_level
,
pooler_fmt_boxes_level
)
return
output
detectron2/modeling/postprocessing.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
torch.nn
import
functional
as
F
from
detectron2.layers
import
paste_masks_in_image
from
detectron2.structures
import
Instances
from
detectron2.utils.memory
import
retry_if_cuda_oom
def
detector_postprocess
(
results
,
output_height
,
output_width
,
mask_threshold
=
0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
scale_x
,
scale_y
=
(
output_width
/
results
.
image_size
[
1
],
output_height
/
results
.
image_size
[
0
])
results
=
Instances
((
output_height
,
output_width
),
**
results
.
get_fields
())
if
results
.
has
(
"pred_boxes"
):
output_boxes
=
results
.
pred_boxes
elif
results
.
has
(
"proposal_boxes"
):
output_boxes
=
results
.
proposal_boxes
output_boxes
.
scale
(
scale_x
,
scale_y
)
output_boxes
.
clip
(
results
.
image_size
)
results
=
results
[
output_boxes
.
nonempty
()]
if
results
.
has
(
"pred_masks"
):
results
.
pred_masks
=
retry_if_cuda_oom
(
paste_masks_in_image
)(
results
.
pred_masks
[:,
0
,
:,
:],
# N, 1, M, M
results
.
pred_boxes
,
results
.
image_size
,
threshold
=
mask_threshold
,
)
if
results
.
has
(
"pred_keypoints"
):
results
.
pred_keypoints
[:,
:,
0
]
*=
scale_x
results
.
pred_keypoints
[:,
:,
1
]
*=
scale_y
return
results
def
sem_seg_postprocess
(
result
,
img_size
,
output_height
,
output_width
):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result
=
result
[:,
:
img_size
[
0
],
:
img_size
[
1
]].
expand
(
1
,
-
1
,
-
1
,
-
1
)
result
=
F
.
interpolate
(
result
,
size
=
(
output_height
,
output_width
),
mode
=
"bilinear"
,
align_corners
=
False
)[
0
]
return
result
detectron2/modeling/proposal_generator/__init__.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
,
build_proposal_generator
from
.rpn
import
RPN_HEAD_REGISTRY
,
build_rpn_head
,
RPN
detectron2/modeling/proposal_generator/build.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
detectron2.utils.registry
import
Registry
PROPOSAL_GENERATOR_REGISTRY
=
Registry
(
"PROPOSAL_GENERATOR"
)
PROPOSAL_GENERATOR_REGISTRY
.
__doc__
=
"""
Registry for proposal generator, which produces object proposals from feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
from
.
import
rpn
,
rrpn
# noqa F401 isort:skip
def
build_proposal_generator
(
cfg
,
input_shape
):
"""
Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
The name can be "PrecomputedProposals" to use no proposal generator.
"""
name
=
cfg
.
MODEL
.
PROPOSAL_GENERATOR
.
NAME
if
name
==
"PrecomputedProposals"
:
return
None
return
PROPOSAL_GENERATOR_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/proposal_generator/proposal_utils.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
math
import
torch
from
detectron2.structures
import
Instances
def
add_ground_truth_to_proposals
(
gt_boxes
,
proposals
):
"""
Call `add_ground_truth_to_proposals_single_image` for all images.
Args:
gt_boxes(list[Boxes]): list of N elements. Element i is a Boxes
representing the gound-truth for image i.
proposals (list[Instances]): list of N elements. Element i is a Instances
representing the proposals for image i.
Returns:
list[Instances]: list of N Instances. Each is the proposals for the image,
with field "proposal_boxes" and "objectness_logits".
"""
assert
gt_boxes
is
not
None
assert
len
(
proposals
)
==
len
(
gt_boxes
)
if
len
(
proposals
)
==
0
:
return
proposals
return
[
add_ground_truth_to_proposals_single_image
(
gt_boxes_i
,
proposals_i
)
for
gt_boxes_i
,
proposals_i
in
zip
(
gt_boxes
,
proposals
)
]
def
add_ground_truth_to_proposals_single_image
(
gt_boxes
,
proposals
):
"""
Augment `proposals` with ground-truth boxes from `gt_boxes`.
Args:
Same as `add_ground_truth_to_proposals`, but with gt_boxes and proposals
per image.
Returns:
Same as `add_ground_truth_to_proposals`, but for only one image.
"""
device
=
proposals
.
objectness_logits
.
device
# Concatenating gt_boxes with proposals requires them to have the same fields
# Assign all ground-truth boxes an objectness logit corresponding to P(object) \approx 1.
gt_logit_value
=
math
.
log
((
1.0
-
1e-10
)
/
(
1
-
(
1.0
-
1e-10
)))
gt_logits
=
gt_logit_value
*
torch
.
ones
(
len
(
gt_boxes
),
device
=
device
)
gt_proposal
=
Instances
(
proposals
.
image_size
)
gt_proposal
.
proposal_boxes
=
gt_boxes
gt_proposal
.
objectness_logits
=
gt_logits
new_proposals
=
Instances
.
cat
([
proposals
,
gt_proposal
])
return
new_proposals
detectron2/modeling/proposal_generator/rpn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
typing
import
Dict
,
List
import
torch
import
torch.nn.functional
as
F
from
torch
import
nn
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
from
detectron2.structures
import
Boxes
,
Instances
,
pairwise_iou
from
detectron2.utils.memory
import
retry_if_cuda_oom
from
detectron2.utils.registry
import
Registry
from
..anchor_generator
import
build_anchor_generator
from
..box_regression
import
Box2BoxTransform
from
..matcher
import
Matcher
from
..sampling
import
subsample_labels
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
from
.rpn_outputs
import
RPNOutputs
,
find_top_rpn_proposals
RPN_HEAD_REGISTRY
=
Registry
(
"RPN_HEAD"
)
RPN_HEAD_REGISTRY
.
__doc__
=
"""
Registry for RPN heads, which take feature maps and perform
objectness classification and bounding box regression for anchors.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
def
build_rpn_head
(
cfg
,
input_shape
):
"""
Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
"""
name
=
cfg
.
MODEL
.
RPN
.
HEAD_NAME
return
RPN_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
@
RPN_HEAD_REGISTRY
.
register
()
class
StandardRPNHead
(
nn
.
Module
):
"""
Standard RPN classification and regression heads described in :paper:`Faster R-CNN`.
Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts
objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas
specifying how to deform each anchor into an object proposal.
"""
@
configurable
def
__init__
(
self
,
*
,
in_channels
:
int
,
num_anchors
:
int
,
box_dim
:
int
=
4
):
"""
NOTE: this interface is experimental.
Args:
in_channels (int): number of input feature channels. When using multiple
input features, they must have the same number of channels.
num_anchors (int): number of anchors to predict for *each spatial position*
on the feature map. The total number of anchors for each
feature map will be `num_anchors * H * W`.
box_dim (int): dimension of a box, which is also the number of box regression
predictions to make for each anchor. An axis aligned box has
box_dim=4, while a rotated box has box_dim=5.
"""
super
().
__init__
()
# 3x3 conv for the hidden representation
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
in_channels
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
# 1x1 conv for predicting objectness logits
self
.
objectness_logits
=
nn
.
Conv2d
(
in_channels
,
num_anchors
,
kernel_size
=
1
,
stride
=
1
)
# 1x1 conv for predicting box2box transform deltas
self
.
anchor_deltas
=
nn
.
Conv2d
(
in_channels
,
num_anchors
*
box_dim
,
kernel_size
=
1
,
stride
=
1
)
for
l
in
[
self
.
conv
,
self
.
objectness_logits
,
self
.
anchor_deltas
]:
nn
.
init
.
normal_
(
l
.
weight
,
std
=
0.01
)
nn
.
init
.
constant_
(
l
.
bias
,
0
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
# Standard RPN is shared across levels:
in_channels
=
[
s
.
channels
for
s
in
input_shape
]
assert
len
(
set
(
in_channels
))
==
1
,
"Each level must have the same channel!"
in_channels
=
in_channels
[
0
]
# RPNHead should take the same input as anchor generator
# NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
anchor_generator
=
build_anchor_generator
(
cfg
,
input_shape
)
num_anchors
=
anchor_generator
.
num_anchors
box_dim
=
anchor_generator
.
box_dim
assert
(
len
(
set
(
num_anchors
))
==
1
),
"Each level must have the same number of anchors per spatial position"
return
{
"in_channels"
:
in_channels
,
"num_anchors"
:
num_anchors
[
0
],
"box_dim"
:
box_dim
}
def
forward
(
self
,
features
):
"""
Args:
features (list[Tensor]): list of feature maps
Returns:
list[Tensor]: A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for all anchors. A is the number of cell anchors.
list[Tensor]: A list of L elements. Element i is a tensor of shape
(N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
"""
pred_objectness_logits
=
[]
pred_anchor_deltas
=
[]
for
x
in
features
:
t
=
F
.
relu
(
self
.
conv
(
x
))
pred_objectness_logits
.
append
(
self
.
objectness_logits
(
t
))
pred_anchor_deltas
.
append
(
self
.
anchor_deltas
(
t
))
return
pred_objectness_logits
,
pred_anchor_deltas
@
PROPOSAL_GENERATOR_REGISTRY
.
register
()
class
RPN
(
nn
.
Module
):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
def
__init__
(
self
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
super
().
__init__
()
# fmt: off
self
.
min_box_side_len
=
cfg
.
MODEL
.
PROPOSAL_GENERATOR
.
MIN_SIZE
self
.
in_features
=
cfg
.
MODEL
.
RPN
.
IN_FEATURES
self
.
nms_thresh
=
cfg
.
MODEL
.
RPN
.
NMS_THRESH
self
.
batch_size_per_image
=
cfg
.
MODEL
.
RPN
.
BATCH_SIZE_PER_IMAGE
self
.
positive_fraction
=
cfg
.
MODEL
.
RPN
.
POSITIVE_FRACTION
self
.
smooth_l1_beta
=
cfg
.
MODEL
.
RPN
.
SMOOTH_L1_BETA
self
.
loss_weight
=
cfg
.
MODEL
.
RPN
.
LOSS_WEIGHT
# fmt: on
# Map from self.training state to train/test settings
self
.
pre_nms_topk
=
{
True
:
cfg
.
MODEL
.
RPN
.
PRE_NMS_TOPK_TRAIN
,
False
:
cfg
.
MODEL
.
RPN
.
PRE_NMS_TOPK_TEST
,
}
self
.
post_nms_topk
=
{
True
:
cfg
.
MODEL
.
RPN
.
POST_NMS_TOPK_TRAIN
,
False
:
cfg
.
MODEL
.
RPN
.
POST_NMS_TOPK_TEST
,
}
self
.
boundary_threshold
=
cfg
.
MODEL
.
RPN
.
BOUNDARY_THRESH
self
.
anchor_generator
=
build_anchor_generator
(
cfg
,
[
input_shape
[
f
]
for
f
in
self
.
in_features
]
)
self
.
box2box_transform
=
Box2BoxTransform
(
weights
=
cfg
.
MODEL
.
RPN
.
BBOX_REG_WEIGHTS
)
self
.
anchor_matcher
=
Matcher
(
cfg
.
MODEL
.
RPN
.
IOU_THRESHOLDS
,
cfg
.
MODEL
.
RPN
.
IOU_LABELS
,
allow_low_quality_matches
=
True
)
self
.
rpn_head
=
build_rpn_head
(
cfg
,
[
input_shape
[
f
]
for
f
in
self
.
in_features
])
def
_subsample_labels
(
self
,
label
):
"""
Randomly sample a subset of positive and negative examples, and overwrite
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
Args:
labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
"""
pos_idx
,
neg_idx
=
subsample_labels
(
label
,
self
.
batch_size_per_image
,
self
.
positive_fraction
,
0
)
# Fill with the ignore label (-1), then set positive and negative labels
label
.
fill_
(
-
1
)
label
.
scatter_
(
0
,
pos_idx
,
1
)
label
.
scatter_
(
0
,
neg_idx
,
0
)
return
label
@
torch
.
no_grad
()
def
label_and_sample_anchors
(
self
,
anchors
:
List
[
Boxes
],
gt_instances
:
List
[
Instances
]):
"""
Args:
anchors (list[Boxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across feature maps. Label values are in {-1, 0, 1},
with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
list[Tensor]:
i-th element is a Nx4 tensor, where N is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as 1.
"""
anchors
=
Boxes
.
cat
(
anchors
)
gt_boxes
=
[
x
.
gt_boxes
for
x
in
gt_instances
]
image_sizes
=
[
x
.
image_size
for
x
in
gt_instances
]
del
gt_instances
gt_labels
=
[]
matched_gt_boxes
=
[]
for
image_size_i
,
gt_boxes_i
in
zip
(
image_sizes
,
gt_boxes
):
"""
image_size_i: (h, w) for the i-th image
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix
=
retry_if_cuda_oom
(
pairwise_iou
)(
gt_boxes_i
,
anchors
)
matched_idxs
,
gt_labels_i
=
retry_if_cuda_oom
(
self
.
anchor_matcher
)(
match_quality_matrix
)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i
=
gt_labels_i
.
to
(
device
=
gt_boxes_i
.
device
)
del
match_quality_matrix
if
self
.
boundary_threshold
>=
0
:
# Discard anchors that go out of the boundaries of the image
# NOTE: This is legacy functionality that is turned off by default in Detectron2
anchors_inside_image
=
anchors
.
inside_box
(
image_size_i
,
self
.
boundary_threshold
)
gt_labels_i
[
~
anchors_inside_image
]
=
-
1
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i
=
self
.
_subsample_labels
(
gt_labels_i
)
if
len
(
gt_boxes_i
)
==
0
:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i
=
torch
.
zeros_like
(
anchors
.
tensor
)
else
:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i
=
gt_boxes_i
[
matched_idxs
].
tensor
gt_labels
.
append
(
gt_labels_i
)
# N,AHW
matched_gt_boxes
.
append
(
matched_gt_boxes_i
)
return
gt_labels
,
matched_gt_boxes
def
forward
(
self
,
images
,
features
,
gt_instances
=
None
):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits"
loss: dict[Tensor] or None
"""
features
=
[
features
[
f
]
for
f
in
self
.
in_features
]
pred_objectness_logits
,
pred_anchor_deltas
=
self
.
rpn_head
(
features
)
anchors
=
self
.
anchor_generator
(
features
)
if
self
.
training
:
gt_labels
,
gt_boxes
=
self
.
label_and_sample_anchors
(
anchors
,
gt_instances
)
else
:
gt_labels
,
gt_boxes
=
None
,
None
outputs
=
RPNOutputs
(
self
.
box2box_transform
,
self
.
batch_size_per_image
,
images
,
pred_objectness_logits
,
pred_anchor_deltas
,
anchors
,
gt_labels
,
gt_boxes
,
self
.
smooth_l1_beta
,
)
if
self
.
training
:
losses
=
{
k
:
v
*
self
.
loss_weight
for
k
,
v
in
outputs
.
losses
().
items
()}
else
:
losses
=
{}
with
torch
.
no_grad
():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative
# w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
proposals
=
find_top_rpn_proposals
(
outputs
.
predict_proposals
(),
outputs
.
predict_objectness_logits
(),
images
,
self
.
nms_thresh
,
self
.
pre_nms_topk
[
self
.
training
],
self
.
post_nms_topk
[
self
.
training
],
self
.
min_box_side_len
,
self
.
training
,
)
return
proposals
,
losses
detectron2/modeling/proposal_generator/rpn_outputs.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
itertools
import
logging
import
torch
import
torch.nn.functional
as
F
from
fvcore.nn
import
smooth_l1_loss
from
detectron2.layers
import
batched_nms
,
cat
from
detectron2.structures
import
Boxes
,
Instances
from
detectron2.utils.events
import
get_event_storage
logger
=
logging
.
getLogger
(
__name__
)
# TODO: comments for future refactoring of this module
#
# From @rbg:
# This code involves a significant amount of tensor reshaping and permuting. Look for
# ways to simplify this.
"""
Shape shorthand in this module:
N: number of images in the minibatch
L: number of feature maps per image on which RPN is run
A: number of cell anchors (must be the same for all feature maps)
Hi, Wi: height and width of the i-th feature map
4: size of the box parameterization
Naming convention:
objectness: refers to the binary classification of an anchor as object vs. not
object.
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
sigmoid(pred_objectness_logits) to estimate P(object).
gt_labels: ground-truth binary classification labels for objectness
pred_anchor_deltas: predicted box2box transform deltas
gt_anchor_deltas: ground-truth box2box transform deltas
"""
def
find_top_rpn_proposals
(
proposals
,
pred_objectness_logits
,
images
,
nms_thresh
,
pre_nms_topk
,
post_nms_topk
,
min_box_side_len
,
training
,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
images (ImageList): Input images as an :class:`ImageList`.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_side_len (float): minimum proposal box side length in pixels (absolute units
wrt input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
image_sizes
=
images
.
image_sizes
# in (h, w) order
num_images
=
len
(
image_sizes
)
device
=
proposals
[
0
].
device
# 1. Select top-k anchor for every level and every image
topk_scores
=
[]
# #lvl Tensor, each of shape N x topk
topk_proposals
=
[]
level_ids
=
[]
# #lvl Tensor, each of shape (topk,)
batch_idx
=
torch
.
arange
(
num_images
,
device
=
device
)
for
level_id
,
proposals_i
,
logits_i
in
zip
(
itertools
.
count
(),
proposals
,
pred_objectness_logits
):
Hi_Wi_A
=
logits_i
.
shape
[
1
]
num_proposals_i
=
min
(
pre_nms_topk
,
Hi_Wi_A
)
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i
,
idx
=
logits_i
.
sort
(
descending
=
True
,
dim
=
1
)
topk_scores_i
=
logits_i
[
batch_idx
,
:
num_proposals_i
]
topk_idx
=
idx
[
batch_idx
,
:
num_proposals_i
]
# each is N x topk
topk_proposals_i
=
proposals_i
[
batch_idx
[:,
None
],
topk_idx
]
# N x topk x 4
topk_proposals
.
append
(
topk_proposals_i
)
topk_scores
.
append
(
topk_scores_i
)
level_ids
.
append
(
torch
.
full
((
num_proposals_i
,),
level_id
,
dtype
=
torch
.
int64
,
device
=
device
))
# 2. Concat all levels together
topk_scores
=
cat
(
topk_scores
,
dim
=
1
)
topk_proposals
=
cat
(
topk_proposals
,
dim
=
1
)
level_ids
=
cat
(
level_ids
,
dim
=
0
)
# 3. For each image, run a per-level NMS, and choose topk results.
results
=
[]
for
n
,
image_size
in
enumerate
(
image_sizes
):
boxes
=
Boxes
(
topk_proposals
[
n
])
scores_per_img
=
topk_scores
[
n
]
lvl
=
level_ids
valid_mask
=
torch
.
isfinite
(
boxes
.
tensor
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores_per_img
)
if
not
valid_mask
.
all
():
if
training
:
raise
FloatingPointError
(
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
)
boxes
=
boxes
[
valid_mask
]
scores_per_img
=
scores_per_img
[
valid_mask
]
lvl
=
lvl
[
valid_mask
]
boxes
.
clip
(
image_size
)
# filter empty boxes
keep
=
boxes
.
nonempty
(
threshold
=
min_box_side_len
)
if
keep
.
sum
().
item
()
!=
len
(
boxes
):
boxes
,
scores_per_img
,
lvl
=
boxes
[
keep
],
scores_per_img
[
keep
],
lvl
[
keep
]
keep
=
batched_nms
(
boxes
.
tensor
,
scores_per_img
,
lvl
,
nms_thresh
)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep
=
keep
[:
post_nms_topk
]
# keep is already sorted
res
=
Instances
(
image_size
)
res
.
proposal_boxes
=
boxes
[
keep
]
res
.
objectness_logits
=
scores_per_img
[
keep
]
results
.
append
(
res
)
return
results
def
rpn_losses
(
gt_labels
,
gt_anchor_deltas
,
pred_objectness_logits
,
pred_anchor_deltas
,
smooth_l1_beta
):
"""
Args:
gt_labels (Tensor): shape (N,), each element in {-1, 0, 1} representing
ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object.
gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth
box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to
its matched ground-truth box.
pred_objectness_logits (Tensor): shape (N,), each element is a predicted objectness
logit.
pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box
transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da)
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
Returns:
objectness_loss, localization_loss, both unnormalized (summed over samples).
"""
pos_masks
=
gt_labels
==
1
localization_loss
=
smooth_l1_loss
(
pred_anchor_deltas
[
pos_masks
],
gt_anchor_deltas
[
pos_masks
],
smooth_l1_beta
,
reduction
=
"sum"
)
valid_masks
=
gt_labels
>=
0
objectness_loss
=
F
.
binary_cross_entropy_with_logits
(
pred_objectness_logits
[
valid_masks
],
gt_labels
[
valid_masks
].
to
(
torch
.
float32
),
reduction
=
"sum"
,
)
return
objectness_loss
,
localization_loss
class
RPNOutputs
(
object
):
def
__init__
(
self
,
box2box_transform
,
batch_size_per_image
,
images
,
pred_objectness_logits
,
pred_anchor_deltas
,
anchors
,
gt_labels
=
None
,
gt_boxes
=
None
,
smooth_l1_beta
=
0.0
,
):
"""
Args:
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for
anchor-proposal transformations.
images (ImageList): :class:`ImageList` instance representing N input images
batch_size_per_image (int): number of proposals to sample when training
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for anchors.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, A*4 or 5, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
anchors (list[Boxes or RotatedBoxes]): A list of Boxes/RotatedBoxes storing the all
the anchors for each feature map. See :meth:`AnchorGenerator.forward`.
gt_labels (list[Tensor]): Available on in training.
See :meth:`RPN.label_and_sample_anchors`.
gt_boxes (list[Boxes or RotatedBoxes]): Available on in training.
See :meth:`RPN.label_and_sample_anchors`.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self
.
box2box_transform
=
box2box_transform
self
.
batch_size_per_image
=
batch_size_per_image
B
=
anchors
[
0
].
tensor
.
size
(
1
)
# box dimension (4 or 5)
self
.
pred_objectness_logits
=
[
# Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score
.
permute
(
0
,
2
,
3
,
1
).
flatten
(
1
)
for
score
in
pred_objectness_logits
]
self
.
pred_anchor_deltas
=
[
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B)
# -> (N, Hi*Wi*A, B)
x
.
view
(
x
.
shape
[
0
],
-
1
,
B
,
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
.
permute
(
0
,
3
,
4
,
1
,
2
)
.
flatten
(
1
,
-
2
)
for
x
in
pred_anchor_deltas
]
self
.
anchors
=
anchors
self
.
gt_boxes
=
gt_boxes
self
.
gt_labels
=
gt_labels
self
.
num_images
=
len
(
images
)
self
.
smooth_l1_beta
=
smooth_l1_beta
def
losses
(
self
):
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
gt_labels
=
torch
.
stack
(
self
.
gt_labels
)
anchors
=
self
.
anchors
[
0
].
cat
(
self
.
anchors
).
tensor
# Ax(4 or 5)
gt_anchor_deltas
=
[
self
.
box2box_transform
.
get_deltas
(
anchors
,
k
)
for
k
in
self
.
gt_boxes
]
gt_anchor_deltas
=
torch
.
stack
(
gt_anchor_deltas
)
# Log the number of positive/negative anchors per-image that's used in training
num_pos_anchors
=
(
gt_labels
==
1
).
sum
().
item
()
num_neg_anchors
=
(
gt_labels
==
0
).
sum
().
item
()
storage
=
get_event_storage
()
storage
.
put_scalar
(
"rpn/num_pos_anchors"
,
num_pos_anchors
/
self
.
num_images
)
storage
.
put_scalar
(
"rpn/num_neg_anchors"
,
num_neg_anchors
/
self
.
num_images
)
objectness_loss
,
localization_loss
=
rpn_losses
(
gt_labels
,
gt_anchor_deltas
,
# concat on the Hi*Wi*A dimension
cat
(
self
.
pred_objectness_logits
,
dim
=
1
),
cat
(
self
.
pred_anchor_deltas
,
dim
=
1
),
self
.
smooth_l1_beta
,
)
normalizer
=
self
.
batch_size_per_image
*
self
.
num_images
return
{
"loss_rpn_cls"
:
objectness_loss
/
normalizer
,
"loss_rpn_loc"
:
localization_loss
/
normalizer
,
}
def
predict_proposals
(
self
):
"""
Transform anchors into proposals by applying the predicted anchor deltas.
Returns:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A, B), where B is box dimension (4 or 5).
"""
proposals
=
[]
# For each feature map
for
anchors_i
,
pred_anchor_deltas_i
in
zip
(
self
.
anchors
,
self
.
pred_anchor_deltas
):
B
=
anchors_i
.
tensor
.
size
(
1
)
N
=
self
.
num_images
pred_anchor_deltas_i
=
pred_anchor_deltas_i
.
reshape
(
-
1
,
B
)
# Expand anchors to shape (N*Hi*Wi*A, B)
anchors_i
=
anchors_i
.
tensor
.
unsqueeze
(
0
).
expand
(
N
,
-
1
,
-
1
).
reshape
(
-
1
,
B
)
proposals_i
=
self
.
box2box_transform
.
apply_deltas
(
pred_anchor_deltas_i
,
anchors_i
)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals
.
append
(
proposals_i
.
view
(
N
,
-
1
,
B
))
return
proposals
def
predict_objectness_logits
(
self
):
"""
Return objectness logits in the same format as the proposals returned by
:meth:`predict_proposals`.
Returns:
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A).
"""
return
self
.
pred_objectness_logits
detectron2/modeling/proposal_generator/rrpn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
itertools
import
logging
from
typing
import
Dict
,
List
import
torch
from
detectron2.layers
import
ShapeSpec
,
batched_nms_rotated
,
cat
from
detectron2.structures
import
Instances
,
RotatedBoxes
,
pairwise_iou_rotated
from
detectron2.utils.memory
import
retry_if_cuda_oom
from
..box_regression
import
Box2BoxTransformRotated
from
.build
import
PROPOSAL_GENERATOR_REGISTRY
from
.rpn
import
RPN
from
.rpn_outputs
import
RPNOutputs
logger
=
logging
.
getLogger
(
__name__
)
def
find_top_rrpn_proposals
(
proposals
,
pred_objectness_logits
,
images
,
nms_thresh
,
pre_nms_topk
,
post_nms_topk
,
min_box_side_len
,
training
,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
images (ImageList): Input images as an :class:`ImageList`.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_side_len (float): minimum proposal box side length in pixels (absolute units
wrt input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
image_sizes
=
images
.
image_sizes
# in (h, w) order
num_images
=
len
(
image_sizes
)
device
=
proposals
[
0
].
device
# 1. Select top-k anchor for every level and every image
topk_scores
=
[]
# #lvl Tensor, each of shape N x topk
topk_proposals
=
[]
level_ids
=
[]
# #lvl Tensor, each of shape (topk,)
batch_idx
=
torch
.
arange
(
num_images
,
device
=
device
)
for
level_id
,
proposals_i
,
logits_i
in
zip
(
itertools
.
count
(),
proposals
,
pred_objectness_logits
):
Hi_Wi_A
=
logits_i
.
shape
[
1
]
num_proposals_i
=
min
(
pre_nms_topk
,
Hi_Wi_A
)
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i
,
idx
=
logits_i
.
sort
(
descending
=
True
,
dim
=
1
)
topk_scores_i
=
logits_i
[
batch_idx
,
:
num_proposals_i
]
topk_idx
=
idx
[
batch_idx
,
:
num_proposals_i
]
# each is N x topk
topk_proposals_i
=
proposals_i
[
batch_idx
[:,
None
],
topk_idx
]
# N x topk x 5
topk_proposals
.
append
(
topk_proposals_i
)
topk_scores
.
append
(
topk_scores_i
)
level_ids
.
append
(
torch
.
full
((
num_proposals_i
,),
level_id
,
dtype
=
torch
.
int64
,
device
=
device
))
# 2. Concat all levels together
topk_scores
=
cat
(
topk_scores
,
dim
=
1
)
topk_proposals
=
cat
(
topk_proposals
,
dim
=
1
)
level_ids
=
cat
(
level_ids
,
dim
=
0
)
# 3. For each image, run a per-level NMS, and choose topk results.
results
=
[]
for
n
,
image_size
in
enumerate
(
image_sizes
):
boxes
=
RotatedBoxes
(
topk_proposals
[
n
])
scores_per_img
=
topk_scores
[
n
]
valid_mask
=
torch
.
isfinite
(
boxes
.
tensor
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores_per_img
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores_per_img
=
scores_per_img
[
valid_mask
]
boxes
.
clip
(
image_size
)
# filter empty boxes
keep
=
boxes
.
nonempty
(
threshold
=
min_box_side_len
)
lvl
=
level_ids
if
keep
.
sum
().
item
()
!=
len
(
boxes
):
boxes
,
scores_per_img
,
lvl
=
(
boxes
[
keep
],
scores_per_img
[
keep
],
level_ids
[
keep
])
keep
=
batched_nms_rotated
(
boxes
.
tensor
,
scores_per_img
,
lvl
,
nms_thresh
)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep
=
keep
[:
post_nms_topk
]
res
=
Instances
(
image_size
)
res
.
proposal_boxes
=
boxes
[
keep
]
res
.
objectness_logits
=
scores_per_img
[
keep
]
results
.
append
(
res
)
return
results
@
PROPOSAL_GENERATOR_REGISTRY
.
register
()
class
RRPN
(
RPN
):
"""
Rotated Region Proposal Network described in :paper:`RRPN`.
"""
def
__init__
(
self
,
cfg
,
input_shape
:
Dict
[
str
,
ShapeSpec
]):
super
().
__init__
(
cfg
,
input_shape
)
self
.
box2box_transform
=
Box2BoxTransformRotated
(
weights
=
cfg
.
MODEL
.
RPN
.
BBOX_REG_WEIGHTS
)
if
self
.
boundary_threshold
>=
0
:
raise
NotImplementedError
(
"boundary_threshold is a legacy option not implemented for RRPN."
)
@
torch
.
no_grad
()
def
label_and_sample_anchors
(
self
,
anchors
:
List
[
RotatedBoxes
],
gt_instances
:
List
[
Instances
]):
"""
Args:
anchors (list[RotatedBoxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across feature maps. Label values are in {-1, 0, 1},
with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
list[Tensor]:
i-th element is a Nx5 tensor, where N is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as 1.
"""
anchors
=
RotatedBoxes
.
cat
(
anchors
)
gt_boxes
=
[
x
.
gt_boxes
for
x
in
gt_instances
]
del
gt_instances
gt_labels
=
[]
matched_gt_boxes
=
[]
for
gt_boxes_i
in
gt_boxes
:
"""
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix
=
retry_if_cuda_oom
(
pairwise_iou_rotated
)(
gt_boxes_i
,
anchors
)
matched_idxs
,
gt_labels_i
=
retry_if_cuda_oom
(
self
.
anchor_matcher
)(
match_quality_matrix
)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i
=
gt_labels_i
.
to
(
device
=
gt_boxes_i
.
device
)
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i
=
self
.
_subsample_labels
(
gt_labels_i
)
if
len
(
gt_boxes_i
)
==
0
:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i
=
torch
.
zeros_like
(
anchors
.
tensor
)
else
:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i
=
gt_boxes_i
[
matched_idxs
].
tensor
gt_labels
.
append
(
gt_labels_i
)
# N,AHW
matched_gt_boxes
.
append
(
matched_gt_boxes_i
)
return
gt_labels
,
matched_gt_boxes
def
forward
(
self
,
images
,
features
,
gt_instances
=
None
):
# same signature as RPN.forward
features
=
[
features
[
f
]
for
f
in
self
.
in_features
]
pred_objectness_logits
,
pred_anchor_deltas
=
self
.
rpn_head
(
features
)
anchors
=
self
.
anchor_generator
(
features
)
if
self
.
training
:
gt_labels
,
gt_boxes
=
self
.
label_and_sample_anchors
(
anchors
,
gt_instances
)
else
:
gt_labels
,
gt_boxes
=
None
,
None
outputs
=
RPNOutputs
(
self
.
box2box_transform
,
self
.
batch_size_per_image
,
images
,
pred_objectness_logits
,
pred_anchor_deltas
,
anchors
,
gt_labels
,
gt_boxes
,
self
.
smooth_l1_beta
,
)
if
self
.
training
:
losses
=
{
k
:
v
*
self
.
loss_weight
for
k
,
v
in
outputs
.
losses
().
items
()}
else
:
losses
=
{}
with
torch
.
no_grad
():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative
# w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
# Note: this line is the only difference v.s. RPN.forward
proposals
=
find_top_rrpn_proposals
(
outputs
.
predict_proposals
(),
outputs
.
predict_objectness_logits
(),
images
,
self
.
nms_thresh
,
self
.
pre_nms_topk
[
self
.
training
],
self
.
post_nms_topk
[
self
.
training
],
self
.
min_box_side_len
,
self
.
training
,
)
return
proposals
,
losses
detectron2/modeling/roi_heads/__init__.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
.box_head
import
ROI_BOX_HEAD_REGISTRY
,
build_box_head
from
.keypoint_head
import
ROI_KEYPOINT_HEAD_REGISTRY
,
build_keypoint_head
,
BaseKeypointRCNNHead
from
.mask_head
import
ROI_MASK_HEAD_REGISTRY
,
build_mask_head
,
BaseMaskRCNNHead
from
.roi_heads
import
(
ROI_HEADS_REGISTRY
,
ROIHeads
,
Res5ROIHeads
,
StandardROIHeads
,
build_roi_heads
,
select_foreground_proposals
,
)
from
.rotated_fast_rcnn
import
RROIHeads
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.
import
cascade_rcnn
# isort:skip
detectron2/modeling/roi_heads/box_head.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
numpy
as
np
from
typing
import
List
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
Linear
,
ShapeSpec
,
get_norm
from
detectron2.utils.registry
import
Registry
ROI_BOX_HEAD_REGISTRY
=
Registry
(
"ROI_BOX_HEAD"
)
ROI_BOX_HEAD_REGISTRY
.
__doc__
=
"""
Registry for box heads, which make box predictions from per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
@
ROI_BOX_HEAD_REGISTRY
.
register
()
class
FastRCNNConvFCHead
(
nn
.
Module
):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu).
"""
@
configurable
def
__init__
(
self
,
input_shape
:
ShapeSpec
,
*
,
conv_dims
:
List
[
int
],
fc_dims
:
List
[
int
],
conv_norm
=
""
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
"""
super
().
__init__
()
assert
len
(
conv_dims
)
+
len
(
fc_dims
)
>
0
self
.
_output_size
=
(
input_shape
.
channels
,
input_shape
.
height
,
input_shape
.
width
)
self
.
conv_norm_relus
=
[]
for
k
,
conv_dim
in
enumerate
(
conv_dims
):
conv
=
Conv2d
(
self
.
_output_size
[
0
],
conv_dim
,
kernel_size
=
3
,
padding
=
1
,
bias
=
not
conv_norm
,
norm
=
get_norm
(
conv_norm
,
conv_dim
),
activation
=
F
.
relu
,
)
self
.
add_module
(
"conv{}"
.
format
(
k
+
1
),
conv
)
self
.
conv_norm_relus
.
append
(
conv
)
self
.
_output_size
=
(
conv_dim
,
self
.
_output_size
[
1
],
self
.
_output_size
[
2
])
self
.
fcs
=
[]
for
k
,
fc_dim
in
enumerate
(
fc_dims
):
fc
=
Linear
(
np
.
prod
(
self
.
_output_size
),
fc_dim
)
self
.
add_module
(
"fc{}"
.
format
(
k
+
1
),
fc
)
self
.
fcs
.
append
(
fc
)
self
.
_output_size
=
fc_dim
for
layer
in
self
.
conv_norm_relus
:
weight_init
.
c2_msra_fill
(
layer
)
for
layer
in
self
.
fcs
:
weight_init
.
c2_xavier_fill
(
layer
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
num_conv
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NUM_CONV
conv_dim
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CONV_DIM
num_fc
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NUM_FC
fc_dim
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
FC_DIM
return
{
"input_shape"
:
input_shape
,
"conv_dims"
:
[
conv_dim
]
*
num_conv
,
"fc_dims"
:
[
fc_dim
]
*
num_fc
,
"conv_norm"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NORM
,
}
def
forward
(
self
,
x
):
for
layer
in
self
.
conv_norm_relus
:
x
=
layer
(
x
)
if
len
(
self
.
fcs
):
if
x
.
dim
()
>
2
:
x
=
torch
.
flatten
(
x
,
start_dim
=
1
)
for
layer
in
self
.
fcs
:
x
=
F
.
relu
(
layer
(
x
))
return
x
@
property
def
output_shape
(
self
):
"""
Returns:
ShapeSpec: the output feature shape
"""
o
=
self
.
_output_size
if
isinstance
(
o
,
int
):
return
ShapeSpec
(
channels
=
o
)
else
:
return
ShapeSpec
(
channels
=
o
[
0
],
height
=
o
[
1
],
width
=
o
[
2
])
def
build_box_head
(
cfg
,
input_shape
):
"""
Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
NAME
return
ROI_BOX_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/roi_heads/cascade_rcnn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
typing
import
List
import
torch
from
torch
import
nn
from
torch.autograd.function
import
Function
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
from
detectron2.structures
import
Boxes
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
..box_regression
import
Box2BoxTransform
from
..matcher
import
Matcher
from
..poolers
import
ROIPooler
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
,
fast_rcnn_inference
from
.roi_heads
import
ROI_HEADS_REGISTRY
,
StandardROIHeads
class
_ScaleGradient
(
Function
):
@
staticmethod
def
forward
(
ctx
,
input
,
scale
):
ctx
.
scale
=
scale
return
input
@
staticmethod
def
backward
(
ctx
,
grad_output
):
return
grad_output
*
ctx
.
scale
,
None
@
ROI_HEADS_REGISTRY
.
register
()
class
CascadeROIHeads
(
StandardROIHeads
):
"""
Implement :paper:`Cascade R-CNN`.
"""
@
configurable
def
__init__
(
self
,
*
,
box_in_features
:
List
[
str
],
box_pooler
:
ROIPooler
,
box_heads
:
List
[
nn
.
Module
],
box_predictors
:
List
[
nn
.
Module
],
proposal_matchers
:
List
[
Matcher
],
**
kwargs
,
):
"""
NOTE: this interface is experimental.
Args:
box_pooler (ROIPooler): pooler that extracts region features from given boxes
box_heads (list[nn.Module]): box head for each cascade stage
box_predictors (list[nn.Module]): box predictor for each cascade stage
proposal_matchers (list[Matcher]): matcher with different IoU thresholds to
match boxes with ground truth for each stage. The first matcher matches
RPN proposals with ground truth, the other matchers use boxes predicted
by the previous stage as proposals and match them with ground truth.
"""
assert
"proposal_matcher"
not
in
kwargs
,
(
"CascadeROIHeads takes 'proposal_matchers=' for each stage instead "
"of one 'proposal_matcher='."
)
# The first matcher matches RPN proposals with ground truth, done in the base class
kwargs
[
"proposal_matcher"
]
=
proposal_matchers
[
0
]
num_stages
=
self
.
num_cascade_stages
=
len
(
box_heads
)
box_heads
=
nn
.
ModuleList
(
box_heads
)
box_predictors
=
nn
.
ModuleList
(
box_predictors
)
assert
len
(
box_predictors
)
==
num_stages
,
f
"
{
len
(
box_predictors
)
}
!=
{
num_stages
}
!"
assert
len
(
proposal_matchers
)
==
num_stages
,
f
"
{
len
(
proposal_matchers
)
}
!=
{
num_stages
}
!"
super
().
__init__
(
box_in_features
=
box_in_features
,
box_pooler
=
box_pooler
,
box_head
=
box_heads
,
box_predictor
=
box_predictors
,
**
kwargs
,
)
self
.
proposal_matchers
=
proposal_matchers
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
ret
.
pop
(
"proposal_matcher"
)
return
ret
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
cascade_bbox_reg_weights
=
cfg
.
MODEL
.
ROI_BOX_CASCADE_HEAD
.
BBOX_REG_WEIGHTS
cascade_ious
=
cfg
.
MODEL
.
ROI_BOX_CASCADE_HEAD
.
IOUS
assert
len
(
cascade_bbox_reg_weights
)
==
len
(
cascade_ious
)
assert
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CLS_AGNOSTIC_BBOX_REG
,
\
"CascadeROIHeads only support class-agnostic regression now!"
assert
cascade_ious
[
0
]
==
cfg
.
MODEL
.
ROI_HEADS
.
IOU_THRESHOLDS
[
0
]
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
]
# Check all channel counts are equal
assert
len
(
set
(
in_channels
))
==
1
,
in_channels
in_channels
=
in_channels
[
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
pooled_shape
=
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
box_heads
,
box_predictors
,
proposal_matchers
=
[],
[],
[]
for
match_iou
,
bbox_reg_weights
in
zip
(
cascade_ious
,
cascade_bbox_reg_weights
):
box_head
=
build_box_head
(
cfg
,
pooled_shape
)
box_heads
.
append
(
box_head
)
box_predictors
.
append
(
FastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
,
box2box_transform
=
Box2BoxTransform
(
weights
=
bbox_reg_weights
),
)
)
proposal_matchers
.
append
(
Matcher
([
match_iou
],
[
0
,
1
],
allow_low_quality_matches
=
False
))
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_heads"
:
box_heads
,
"box_predictors"
:
box_predictors
,
"proposal_matchers"
:
proposal_matchers
,
}
def
forward
(
self
,
images
,
features
,
proposals
,
targets
=
None
):
del
images
if
self
.
training
:
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
if
self
.
training
:
# Need targets to box head
losses
=
self
.
_forward_box
(
features
,
proposals
,
targets
)
losses
.
update
(
self
.
_forward_mask
(
features
,
proposals
))
losses
.
update
(
self
.
_forward_keypoint
(
features
,
proposals
))
return
proposals
,
losses
else
:
pred_instances
=
self
.
_forward_box
(
features
,
proposals
)
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
_forward_box
(
self
,
features
,
proposals
,
targets
=
None
):
"""
Args:
features, targets: the same as in
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
"""
features
=
[
features
[
f
]
for
f
in
self
.
box_in_features
]
head_outputs
=
[]
# (predictor, predictions, proposals)
prev_pred_boxes
=
None
image_sizes
=
[
x
.
image_size
for
x
in
proposals
]
for
k
in
range
(
self
.
num_cascade_stages
):
if
k
>
0
:
# The output boxes of the previous stage are used to create the input
# proposals of the next stage.
proposals
=
self
.
_create_proposals_from_boxes
(
prev_pred_boxes
,
image_sizes
)
if
self
.
training
:
proposals
=
self
.
_match_and_label_boxes
(
proposals
,
k
,
targets
)
predictions
=
self
.
_run_stage
(
features
,
proposals
,
k
)
prev_pred_boxes
=
self
.
box_predictor
[
k
].
predict_boxes
(
predictions
,
proposals
)
head_outputs
.
append
((
self
.
box_predictor
[
k
],
predictions
,
proposals
))
if
self
.
training
:
losses
=
{}
storage
=
get_event_storage
()
for
stage
,
(
predictor
,
predictions
,
proposals
)
in
enumerate
(
head_outputs
):
with
storage
.
name_scope
(
"stage{}"
.
format
(
stage
)):
stage_losses
=
predictor
.
losses
(
predictions
,
proposals
)
losses
.
update
({
k
+
"_stage{}"
.
format
(
stage
):
v
for
k
,
v
in
stage_losses
.
items
()})
return
losses
else
:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage
=
[
h
[
0
].
predict_probs
(
h
[
1
],
h
[
2
])
for
h
in
head_outputs
]
# Average the scores across heads
scores
=
[
sum
(
list
(
scores_per_image
))
*
(
1.0
/
self
.
num_cascade_stages
)
for
scores_per_image
in
zip
(
*
scores_per_stage
)
]
# Use the boxes of the last head
predictor
,
predictions
,
proposals
=
head_outputs
[
-
1
]
boxes
=
predictor
.
predict_boxes
(
predictions
,
proposals
)
pred_instances
,
_
=
fast_rcnn_inference
(
boxes
,
scores
,
image_sizes
,
predictor
.
test_score_thresh
,
predictor
.
test_nms_thresh
,
predictor
.
test_topk_per_image
,
)
return
pred_instances
@
torch
.
no_grad
()
def
_match_and_label_boxes
(
self
,
proposals
,
stage
,
targets
):
"""
Match proposals with groundtruth using the matcher at the given stage.
Label the proposals as foreground or background based on the match.
Args:
proposals (list[Instances]): One Instances for each image, with
the field "proposal_boxes".
stage (int): the current stage
targets (list[Instances]): the ground truth instances
Returns:
list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes"
"""
num_fg_samples
,
num_bg_samples
=
[],
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
match_quality_matrix
=
pairwise_iou
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
# proposal_labels are 0 or 1
matched_idxs
,
proposal_labels
=
self
.
proposal_matchers
[
stage
](
match_quality_matrix
)
if
len
(
targets_per_image
)
>
0
:
gt_classes
=
targets_per_image
.
gt_classes
[
matched_idxs
]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes
[
proposal_labels
==
0
]
=
self
.
num_classes
gt_boxes
=
targets_per_image
.
gt_boxes
[
matched_idxs
]
else
:
gt_classes
=
torch
.
zeros_like
(
matched_idxs
)
+
self
.
num_classes
gt_boxes
=
Boxes
(
targets_per_image
.
gt_boxes
.
tensor
.
new_zeros
((
len
(
proposals_per_image
),
4
))
)
proposals_per_image
.
gt_classes
=
gt_classes
proposals_per_image
.
gt_boxes
=
gt_boxes
num_fg_samples
.
append
((
proposal_labels
==
1
).
sum
().
item
())
num_bg_samples
.
append
(
proposal_labels
.
numel
()
-
num_fg_samples
[
-
1
])
# Log the number of fg/bg samples in each stage
storage
=
get_event_storage
()
storage
.
put_scalar
(
"stage{}/roi_head/num_fg_samples"
.
format
(
stage
),
sum
(
num_fg_samples
)
/
len
(
num_fg_samples
),
)
storage
.
put_scalar
(
"stage{}/roi_head/num_bg_samples"
.
format
(
stage
),
sum
(
num_bg_samples
)
/
len
(
num_bg_samples
),
)
return
proposals
def
_run_stage
(
self
,
features
,
proposals
,
stage
):
"""
Args:
features (list[Tensor]): #lvl input features to ROIHeads
proposals (list[Instances]): #image Instances, with the field "proposal_boxes"
stage (int): the current stage
Returns:
Same output as `FastRCNNOutputLayers.forward()`.
"""
box_features
=
self
.
box_pooler
(
features
,
[
x
.
proposal_boxes
for
x
in
proposals
])
# The original implementation averages the losses among heads,
# but scale up the parameter gradients of the heads.
# This is equivalent to adding the losses among heads,
# but scale down the gradients on features.
box_features
=
_ScaleGradient
.
apply
(
box_features
,
1.0
/
self
.
num_cascade_stages
)
box_features
=
self
.
box_head
[
stage
](
box_features
)
return
self
.
box_predictor
[
stage
](
box_features
)
def
_create_proposals_from_boxes
(
self
,
boxes
,
image_sizes
):
"""
Args:
boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4
image_sizes (list[tuple]): list of image shapes in (h, w)
Returns:
list[Instances]: per-image proposals with the given boxes.
"""
# Just like RPN, the proposals should not have gradients
boxes
=
[
Boxes
(
b
.
detach
())
for
b
in
boxes
]
proposals
=
[]
for
boxes_per_image
,
image_size
in
zip
(
boxes
,
image_sizes
):
boxes_per_image
.
clip
(
image_size
)
if
self
.
training
:
# do not filter empty boxes at inference time,
# because the scores from each stage need to be aligned and added later
boxes_per_image
=
boxes_per_image
[
boxes_per_image
.
nonempty
()]
prop
=
Instances
(
image_size
)
prop
.
proposal_boxes
=
boxes_per_image
proposals
.
append
(
prop
)
return
proposals
detectron2/modeling/roi_heads/fast_rcnn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
logging
import
torch
from
fvcore.nn
import
smooth_l1_loss
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Linear
,
ShapeSpec
,
batched_nms
,
cat
from
detectron2.modeling.box_regression
import
Box2BoxTransform
,
apply_deltas_broadcast
from
detectron2.structures
import
Boxes
,
Instances
from
detectron2.utils.events
import
get_event_storage
__all__
=
[
"fast_rcnn_inference"
,
"FastRCNNOutputLayers"
]
logger
=
logging
.
getLogger
(
__name__
)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
def
fast_rcnn_inference
(
boxes
,
scores
,
image_shapes
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image
=
[
fast_rcnn_inference_single_image
(
boxes_per_image
,
scores_per_image
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
)
for
scores_per_image
,
boxes_per_image
,
image_shape
in
zip
(
scores
,
boxes
,
image_shapes
)
]
return
[
x
[
0
]
for
x
in
result_per_image
],
[
x
[
1
]
for
x
in
result_per_image
]
def
fast_rcnn_inference_single_image
(
boxes
,
scores
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask
=
torch
.
isfinite
(
boxes
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores
).
all
(
dim
=
1
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores
=
scores
[
valid_mask
]
scores
=
scores
[:,
:
-
1
]
num_bbox_reg_classes
=
boxes
.
shape
[
1
]
//
4
# Convert to Boxes to use the `clip` function ...
boxes
=
Boxes
(
boxes
.
reshape
(
-
1
,
4
))
boxes
.
clip
(
image_shape
)
boxes
=
boxes
.
tensor
.
view
(
-
1
,
num_bbox_reg_classes
,
4
)
# R x C x 4
# Filter results based on detection scores
filter_mask
=
scores
>
score_thresh
# R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds
=
filter_mask
.
nonzero
()
if
num_bbox_reg_classes
==
1
:
boxes
=
boxes
[
filter_inds
[:,
0
],
0
]
else
:
boxes
=
boxes
[
filter_mask
]
scores
=
scores
[
filter_mask
]
# Apply per-class NMS
keep
=
batched_nms
(
boxes
,
scores
,
filter_inds
[:,
1
],
nms_thresh
)
if
topk_per_image
>=
0
:
keep
=
keep
[:
topk_per_image
]
boxes
,
scores
,
filter_inds
=
boxes
[
keep
],
scores
[
keep
],
filter_inds
[
keep
]
result
=
Instances
(
image_shape
)
result
.
pred_boxes
=
Boxes
(
boxes
)
result
.
scores
=
scores
result
.
pred_classes
=
filter_inds
[:,
1
]
return
result
,
filter_inds
[:,
0
]
class
FastRCNNOutputs
(
object
):
"""
A class that stores information about outputs of a Fast R-CNN head.
It provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def
__init__
(
self
,
box2box_transform
,
pred_class_logits
,
pred_proposal_deltas
,
proposals
,
smooth_l1_beta
=
0
,
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
The total number of all instances must be equal to R.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self
.
box2box_transform
=
box2box_transform
self
.
num_preds_per_image
=
[
len
(
p
)
for
p
in
proposals
]
self
.
pred_class_logits
=
pred_class_logits
self
.
pred_proposal_deltas
=
pred_proposal_deltas
self
.
smooth_l1_beta
=
smooth_l1_beta
self
.
image_shapes
=
[
x
.
image_size
for
x
in
proposals
]
if
len
(
proposals
):
box_type
=
type
(
proposals
[
0
].
proposal_boxes
)
# cat(..., dim=0) concatenates over all images in the batch
self
.
proposals
=
box_type
.
cat
([
p
.
proposal_boxes
for
p
in
proposals
])
assert
(
not
self
.
proposals
.
tensor
.
requires_grad
),
"Proposals should not require gradients!"
# The following fields should exist only when training.
if
proposals
[
0
].
has
(
"gt_boxes"
):
self
.
gt_boxes
=
box_type
.
cat
([
p
.
gt_boxes
for
p
in
proposals
])
assert
proposals
[
0
].
has
(
"gt_classes"
)
self
.
gt_classes
=
cat
([
p
.
gt_classes
for
p
in
proposals
],
dim
=
0
)
else
:
self
.
proposals
=
Boxes
(
torch
.
zeros
(
0
,
4
,
device
=
self
.
pred_proposal_deltas
.
device
))
self
.
_no_instances
=
len
(
proposals
)
==
0
# no instances found
def
_log_accuracy
(
self
):
"""
Log the accuracy metrics to EventStorage.
"""
num_instances
=
self
.
gt_classes
.
numel
()
pred_classes
=
self
.
pred_class_logits
.
argmax
(
dim
=
1
)
bg_class_ind
=
self
.
pred_class_logits
.
shape
[
1
]
-
1
fg_inds
=
(
self
.
gt_classes
>=
0
)
&
(
self
.
gt_classes
<
bg_class_ind
)
num_fg
=
fg_inds
.
nonzero
().
numel
()
fg_gt_classes
=
self
.
gt_classes
[
fg_inds
]
fg_pred_classes
=
pred_classes
[
fg_inds
]
num_false_negative
=
(
fg_pred_classes
==
bg_class_ind
).
nonzero
().
numel
()
num_accurate
=
(
pred_classes
==
self
.
gt_classes
).
nonzero
().
numel
()
fg_num_accurate
=
(
fg_pred_classes
==
fg_gt_classes
).
nonzero
().
numel
()
storage
=
get_event_storage
()
if
num_instances
>
0
:
storage
.
put_scalar
(
"fast_rcnn/cls_accuracy"
,
num_accurate
/
num_instances
)
if
num_fg
>
0
:
storage
.
put_scalar
(
"fast_rcnn/fg_cls_accuracy"
,
fg_num_accurate
/
num_fg
)
storage
.
put_scalar
(
"fast_rcnn/false_negative"
,
num_false_negative
/
num_fg
)
def
softmax_cross_entropy_loss
(
self
):
"""
Compute the softmax cross entropy loss for box classification.
Returns:
scalar Tensor
"""
if
self
.
_no_instances
:
return
0.0
*
self
.
pred_class_logits
.
sum
()
else
:
self
.
_log_accuracy
()
return
F
.
cross_entropy
(
self
.
pred_class_logits
,
self
.
gt_classes
,
reduction
=
"mean"
)
def
smooth_l1_loss
(
self
):
"""
Compute the smooth L1 loss for box regression.
Returns:
scalar Tensor
"""
if
self
.
_no_instances
:
return
0.0
*
self
.
pred_proposal_deltas
.
sum
()
gt_proposal_deltas
=
self
.
box2box_transform
.
get_deltas
(
self
.
proposals
.
tensor
,
self
.
gt_boxes
.
tensor
)
box_dim
=
gt_proposal_deltas
.
size
(
1
)
# 4 or 5
cls_agnostic_bbox_reg
=
self
.
pred_proposal_deltas
.
size
(
1
)
==
box_dim
device
=
self
.
pred_proposal_deltas
.
device
bg_class_ind
=
self
.
pred_class_logits
.
shape
[
1
]
-
1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds
=
torch
.
nonzero
(
(
self
.
gt_classes
>=
0
)
&
(
self
.
gt_classes
<
bg_class_ind
),
as_tuple
=
True
)[
0
]
if
cls_agnostic_bbox_reg
:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols
=
torch
.
arange
(
box_dim
,
device
=
device
)
else
:
fg_gt_classes
=
self
.
gt_classes
[
fg_inds
]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols
=
box_dim
*
fg_gt_classes
[:,
None
]
+
torch
.
arange
(
box_dim
,
device
=
device
)
loss_box_reg
=
smooth_l1_loss
(
self
.
pred_proposal_deltas
[
fg_inds
[:,
None
],
gt_class_cols
],
gt_proposal_deltas
[
fg_inds
],
self
.
smooth_l1_beta
,
reduction
=
"sum"
,
)
# The loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
loss_box_reg
=
loss_box_reg
/
self
.
gt_classes
.
numel
()
return
loss_box_reg
def
_predict_boxes
(
self
):
"""
Returns:
Tensor: A Tensors of predicted class-specific or class-agnostic boxes
for all images in a batch. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
return
apply_deltas_broadcast
(
self
.
box2box_transform
,
self
.
pred_proposal_deltas
,
self
.
proposals
.
tensor
)
"""
A subclass is expected to have the following methods because
they are used to query information about the head predictions.
"""
def
losses
(
self
):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
return
{
"loss_cls"
:
self
.
softmax_cross_entropy_loss
(),
"loss_box_reg"
:
self
.
smooth_l1_loss
(),
}
def
predict_boxes
(
self
):
"""
Deprecated
"""
return
self
.
_predict_boxes
().
split
(
self
.
num_preds_per_image
,
dim
=
0
)
def
predict_probs
(
self
):
"""
Deprecated
"""
probs
=
F
.
softmax
(
self
.
pred_class_logits
,
dim
=-
1
)
return
probs
.
split
(
self
.
num_preds_per_image
,
dim
=
0
)
def
inference
(
self
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Deprecated
"""
boxes
=
self
.
predict_boxes
()
scores
=
self
.
predict_probs
()
image_shapes
=
self
.
image_shapes
return
fast_rcnn_inference
(
boxes
,
scores
,
image_shapes
,
score_thresh
,
nms_thresh
,
topk_per_image
)
class
FastRCNNOutputLayers
(
nn
.
Module
):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
@
configurable
def
__init__
(
self
,
input_shape
,
*
,
box2box_transform
,
num_classes
,
cls_agnostic_bbox_reg
=
False
,
smooth_l1_beta
=
0.0
,
test_score_thresh
=
0.0
,
test_nms_thresh
=
0.5
,
test_topk_per_image
=
100
,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss.
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
"""
super
().
__init__
()
if
isinstance
(
input_shape
,
int
):
# some backward compatibility
input_shape
=
ShapeSpec
(
channels
=
input_shape
)
input_size
=
input_shape
.
channels
*
(
input_shape
.
width
or
1
)
*
(
input_shape
.
height
or
1
)
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self
.
cls_score
=
Linear
(
input_size
,
num_classes
+
1
)
num_bbox_reg_classes
=
1
if
cls_agnostic_bbox_reg
else
num_classes
box_dim
=
len
(
box2box_transform
.
weights
)
self
.
bbox_pred
=
Linear
(
input_size
,
num_bbox_reg_classes
*
box_dim
)
nn
.
init
.
normal_
(
self
.
cls_score
.
weight
,
std
=
0.01
)
nn
.
init
.
normal_
(
self
.
bbox_pred
.
weight
,
std
=
0.001
)
for
l
in
[
self
.
cls_score
,
self
.
bbox_pred
]:
nn
.
init
.
constant_
(
l
.
bias
,
0
)
self
.
box2box_transform
=
box2box_transform
self
.
smooth_l1_beta
=
smooth_l1_beta
self
.
test_score_thresh
=
test_score_thresh
self
.
test_nms_thresh
=
test_nms_thresh
self
.
test_topk_per_image
=
test_topk_per_image
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
return
{
"input_shape"
:
input_shape
,
"box2box_transform"
:
Box2BoxTransform
(
weights
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_WEIGHTS
),
# fmt: off
"num_classes"
:
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
,
"cls_agnostic_bbox_reg"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
CLS_AGNOSTIC_BBOX_REG
,
"smooth_l1_beta"
:
cfg
.
MODEL
.
ROI_BOX_HEAD
.
SMOOTH_L1_BETA
,
"test_score_thresh"
:
cfg
.
MODEL
.
ROI_HEADS
.
SCORE_THRESH_TEST
,
"test_nms_thresh"
:
cfg
.
MODEL
.
ROI_HEADS
.
NMS_THRESH_TEST
,
"test_topk_per_image"
:
cfg
.
TEST
.
DETECTIONS_PER_IMAGE
# fmt: on
}
def
forward
(
self
,
x
):
"""
Returns:
Tensor: Nx(K+1) scores for each box
Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
"""
if
x
.
dim
()
>
2
:
x
=
torch
.
flatten
(
x
,
start_dim
=
1
)
scores
=
self
.
cls_score
(
x
)
proposal_deltas
=
self
.
bbox_pred
(
x
)
return
scores
,
proposal_deltas
# TODO: move the implementation to this class.
def
losses
(
self
,
predictions
,
proposals
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
"""
scores
,
proposal_deltas
=
predictions
return
FastRCNNOutputs
(
self
.
box2box_transform
,
scores
,
proposal_deltas
,
proposals
,
self
.
smooth_l1_beta
).
losses
()
def
inference
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes
=
self
.
predict_boxes
(
predictions
,
proposals
)
scores
=
self
.
predict_probs
(
predictions
,
proposals
)
image_shapes
=
[
x
.
image_size
for
x
in
proposals
]
return
fast_rcnn_inference
(
boxes
,
scores
,
image_shapes
,
self
.
test_score_thresh
,
self
.
test_nms_thresh
,
self
.
test_topk_per_image
,
)
def
predict_boxes_for_gt_classes
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if
not
len
(
proposals
):
return
[]
scores
,
proposal_deltas
=
predictions
proposal_boxes
=
[
p
.
proposal_boxes
for
p
in
proposals
]
proposal_boxes
=
proposal_boxes
[
0
].
cat
(
proposal_boxes
).
tensor
N
,
B
=
proposal_boxes
.
shape
predict_boxes
=
apply_deltas_broadcast
(
self
.
box2box_transform
,
proposal_deltas
,
proposal_boxes
)
# Nx(KxB)
K
=
predict_boxes
.
shape
[
1
]
//
B
if
K
>
1
:
gt_classes
=
torch
.
cat
([
p
.
gt_classes
for
p
in
proposals
],
dim
=
0
)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes
=
gt_classes
.
clamp_
(
0
,
K
-
1
)
predict_boxes
=
predict_boxes
.
view
(
N
,
K
,
B
)[
torch
.
arange
(
N
,
dtype
=
torch
.
long
,
device
=
predict_boxes
.
device
),
gt_classes
]
num_prop_per_image
=
[
len
(
p
)
for
p
in
proposals
]
return
predict_boxes
.
split
(
num_prop_per_image
)
def
predict_boxes
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if
not
len
(
proposals
):
return
[]
_
,
proposal_deltas
=
predictions
num_prop_per_image
=
[
len
(
p
)
for
p
in
proposals
]
proposal_boxes
=
[
p
.
proposal_boxes
for
p
in
proposals
]
proposal_boxes
=
proposal_boxes
[
0
].
cat
(
proposal_boxes
).
tensor
predict_boxes
=
apply_deltas_broadcast
(
self
.
box2box_transform
,
proposal_deltas
,
proposal_boxes
)
# Nx(KxB)
return
predict_boxes
.
split
(
num_prop_per_image
)
def
predict_probs
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores
,
_
=
predictions
num_inst_per_image
=
[
len
(
p
)
for
p
in
proposals
]
probs
=
F
.
softmax
(
scores
,
dim
=-
1
)
return
probs
.
split
(
num_inst_per_image
,
dim
=
0
)
detectron2/modeling/roi_heads/keypoint_head.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
typing
import
List
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ConvTranspose2d
,
cat
,
interpolate
from
detectron2.structures
import
Instances
,
heatmaps_to_keypoints
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
_TOTAL_SKIPPED
=
0
ROI_KEYPOINT_HEAD_REGISTRY
=
Registry
(
"ROI_KEYPOINT_HEAD"
)
ROI_KEYPOINT_HEAD_REGISTRY
.
__doc__
=
"""
Registry for keypoint heads, which make keypoint predictions from per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def
build_keypoint_head
(
cfg
,
input_shape
):
"""
Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NAME
return
ROI_KEYPOINT_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
def
keypoint_rcnn_loss
(
pred_keypoint_logits
,
instances
,
normalizer
):
"""
Arguments:
pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number
of instances in the batch, K is the number of keypoints, and S is the side length
of the keypoint heatmap. The values are spatial logits.
instances (list[Instances]): A list of M Instances, where M is the batch size.
These instances are predictions from the model
that are in 1:1 correspondence with pred_keypoint_logits.
Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint`
instance.
normalizer (float): Normalize the loss by this amount.
If not specified, we normalize by the number of visible keypoints in the minibatch.
Returns a scalar tensor containing the loss.
"""
heatmaps
=
[]
valid
=
[]
keypoint_side_len
=
pred_keypoint_logits
.
shape
[
2
]
for
instances_per_image
in
instances
:
if
len
(
instances_per_image
)
==
0
:
continue
keypoints
=
instances_per_image
.
gt_keypoints
heatmaps_per_image
,
valid_per_image
=
keypoints
.
to_heatmap
(
instances_per_image
.
proposal_boxes
.
tensor
,
keypoint_side_len
)
heatmaps
.
append
(
heatmaps_per_image
.
view
(
-
1
))
valid
.
append
(
valid_per_image
.
view
(
-
1
))
if
len
(
heatmaps
):
keypoint_targets
=
cat
(
heatmaps
,
dim
=
0
)
valid
=
cat
(
valid
,
dim
=
0
).
to
(
dtype
=
torch
.
uint8
)
valid
=
torch
.
nonzero
(
valid
).
squeeze
(
1
)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if
len
(
heatmaps
)
==
0
or
valid
.
numel
()
==
0
:
global
_TOTAL_SKIPPED
_TOTAL_SKIPPED
+=
1
storage
=
get_event_storage
()
storage
.
put_scalar
(
"kpts_num_skipped_batches"
,
_TOTAL_SKIPPED
,
smoothing_hint
=
False
)
return
pred_keypoint_logits
.
sum
()
*
0
N
,
K
,
H
,
W
=
pred_keypoint_logits
.
shape
pred_keypoint_logits
=
pred_keypoint_logits
.
view
(
N
*
K
,
H
*
W
)
keypoint_loss
=
F
.
cross_entropy
(
pred_keypoint_logits
[
valid
],
keypoint_targets
[
valid
],
reduction
=
"sum"
)
# If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch
if
normalizer
is
None
:
normalizer
=
valid
.
numel
()
keypoint_loss
/=
normalizer
return
keypoint_loss
def
keypoint_rcnn_inference
(
pred_keypoint_logits
,
pred_instances
):
"""
Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score)
and add it to the `pred_instances` as a `pred_keypoints` field.
Args:
pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number
of instances in the batch, K is the number of keypoints, and S is the side length of
the keypoint heatmap. The values are spatial logits.
pred_instances (list[Instances]): A list of N Instances, where N is the number of images.
Returns:
None. Each element in pred_instances will contain an extra "pred_keypoints" field.
The field is a tensor of shape (#instance, K, 3) where the last
dimension corresponds to (x, y, score).
The scores are larger than 0.
"""
# flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor)
bboxes_flat
=
cat
([
b
.
pred_boxes
.
tensor
for
b
in
pred_instances
],
dim
=
0
)
keypoint_results
=
heatmaps_to_keypoints
(
pred_keypoint_logits
.
detach
(),
bboxes_flat
.
detach
())
num_instances_per_image
=
[
len
(
i
)
for
i
in
pred_instances
]
keypoint_results
=
keypoint_results
[:,
:,
[
0
,
1
,
3
]].
split
(
num_instances_per_image
,
dim
=
0
)
for
keypoint_results_per_image
,
instances_per_image
in
zip
(
keypoint_results
,
pred_instances
):
# keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score)
instances_per_image
.
pred_keypoints
=
keypoint_results_per_image
class
BaseKeypointRCNNHead
(
nn
.
Module
):
"""
Implement the basic Keypoint R-CNN losses and inference logic described in :paper:`Mask R-CNN`.
"""
@
configurable
def
__init__
(
self
,
*
,
num_keypoints
,
loss_weight
=
1.0
,
loss_normalizer
=
1.0
):
"""
NOTE: this interface is experimental.
Args:
num_keypoints (int): number of keypoints to predict
loss_weight (float): weight to multiple on the keypoint loss
loss_normalizer (float or str):
If float, divide the loss by `loss_normalizer * #images`.
If 'visible', the loss is normalized by the total number of
visible keypoints across images.
"""
super
().
__init__
()
self
.
num_keypoints
=
num_keypoints
self
.
loss_weight
=
loss_weight
assert
loss_normalizer
==
"visible"
or
isinstance
(
loss_normalizer
,
float
),
loss_normalizer
self
.
loss_normalizer
=
loss_normalizer
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
{
"loss_weight"
:
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
LOSS_WEIGHT
,
"num_keypoints"
:
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NUM_KEYPOINTS
,
}
normalize_by_visible
=
(
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS
)
# noqa
if
not
normalize_by_visible
:
batch_size_per_image
=
cfg
.
MODEL
.
ROI_HEADS
.
BATCH_SIZE_PER_IMAGE
positive_sample_fraction
=
cfg
.
MODEL
.
ROI_HEADS
.
POSITIVE_FRACTION
ret
[
"loss_normalizer"
]
=
(
ret
[
"num_keypoints"
]
*
batch_size_per_image
*
positive_sample_fraction
)
else
:
ret
[
"loss_normalizer"
]
=
"visible"
return
ret
def
forward
(
self
,
x
,
instances
:
List
[
Instances
]):
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses if in training. The predicted "instances" if in inference.
"""
x
=
self
.
layers
(
x
)
if
self
.
training
:
num_images
=
len
(
instances
)
normalizer
=
(
None
if
self
.
loss_normalizer
==
"visible"
else
num_images
*
self
.
loss_normalizer
)
return
{
"loss_keypoint"
:
keypoint_rcnn_loss
(
x
,
instances
,
normalizer
=
normalizer
)
*
self
.
loss_weight
}
else
:
keypoint_rcnn_inference
(
x
,
instances
)
return
instances
def
layers
(
self
,
x
):
"""
Neural network layers that makes predictions from regional input features.
"""
raise
NotImplementedError
@
ROI_KEYPOINT_HEAD_REGISTRY
.
register
()
class
KRCNNConvDeconvUpsampleHead
(
BaseKeypointRCNNHead
):
"""
A standard keypoint head containing a series of 3x3 convs, followed by
a transpose convolution and bilinear interpolation for upsampling.
"""
@
configurable
def
__init__
(
self
,
input_shape
,
*
,
num_keypoints
,
conv_dims
,
**
kwargs
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
conv_dims: an iterable of output channel counts for each conv in the head
e.g. (512, 512, 512) for three convs outputting 512 channels.
"""
super
().
__init__
(
num_keypoints
=
num_keypoints
,
**
kwargs
)
# default up_scale to 2 (this can be made an option)
up_scale
=
2
in_channels
=
input_shape
.
channels
self
.
blocks
=
[]
for
idx
,
layer_channels
in
enumerate
(
conv_dims
,
1
):
module
=
Conv2d
(
in_channels
,
layer_channels
,
3
,
stride
=
1
,
padding
=
1
)
self
.
add_module
(
"conv_fcn{}"
.
format
(
idx
),
module
)
self
.
blocks
.
append
(
module
)
in_channels
=
layer_channels
deconv_kernel
=
4
self
.
score_lowres
=
ConvTranspose2d
(
in_channels
,
num_keypoints
,
deconv_kernel
,
stride
=
2
,
padding
=
deconv_kernel
//
2
-
1
)
self
.
up_scale
=
up_scale
for
name
,
param
in
self
.
named_parameters
():
if
"bias"
in
name
:
nn
.
init
.
constant_
(
param
,
0
)
elif
"weight"
in
name
:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn
.
init
.
kaiming_normal_
(
param
,
mode
=
"fan_out"
,
nonlinearity
=
"relu"
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
ret
[
"input_shape"
]
=
input_shape
ret
[
"conv_dims"
]
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
CONV_DIMS
return
ret
def
layers
(
self
,
x
):
for
layer
in
self
.
blocks
:
x
=
F
.
relu
(
layer
(
x
))
x
=
self
.
score_lowres
(
x
)
x
=
interpolate
(
x
,
scale_factor
=
self
.
up_scale
,
mode
=
"bilinear"
,
align_corners
=
False
)
return
x
detectron2/modeling/roi_heads/mask_head.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from
typing
import
List
import
fvcore.nn.weight_init
as
weight_init
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
detectron2.config
import
configurable
from
detectron2.layers
import
Conv2d
,
ConvTranspose2d
,
ShapeSpec
,
cat
,
get_norm
from
detectron2.structures
import
Instances
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
ROI_MASK_HEAD_REGISTRY
=
Registry
(
"ROI_MASK_HEAD"
)
ROI_MASK_HEAD_REGISTRY
.
__doc__
=
"""
Registry for mask heads, which predicts instance masks given
per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def
mask_rcnn_loss
(
pred_mask_logits
,
instances
,
vis_period
=
0
):
"""
Compute the mask prediction loss defined in the Mask R-CNN paper.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
vis_period (int): the period (in steps) to dump visualization.
Returns:
mask_loss (Tensor): A scalar tensor containing the loss.
"""
cls_agnostic_mask
=
pred_mask_logits
.
size
(
1
)
==
1
total_num_masks
=
pred_mask_logits
.
size
(
0
)
mask_side_len
=
pred_mask_logits
.
size
(
2
)
assert
pred_mask_logits
.
size
(
2
)
==
pred_mask_logits
.
size
(
3
),
"Mask prediction must be square!"
gt_classes
=
[]
gt_masks
=
[]
for
instances_per_image
in
instances
:
if
len
(
instances_per_image
)
==
0
:
continue
if
not
cls_agnostic_mask
:
gt_classes_per_image
=
instances_per_image
.
gt_classes
.
to
(
dtype
=
torch
.
int64
)
gt_classes
.
append
(
gt_classes_per_image
)
gt_masks_per_image
=
instances_per_image
.
gt_masks
.
crop_and_resize
(
instances_per_image
.
proposal_boxes
.
tensor
,
mask_side_len
).
to
(
device
=
pred_mask_logits
.
device
)
# A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len
gt_masks
.
append
(
gt_masks_per_image
)
if
len
(
gt_masks
)
==
0
:
return
pred_mask_logits
.
sum
()
*
0
gt_masks
=
cat
(
gt_masks
,
dim
=
0
)
if
cls_agnostic_mask
:
pred_mask_logits
=
pred_mask_logits
[:,
0
]
else
:
indices
=
torch
.
arange
(
total_num_masks
)
gt_classes
=
cat
(
gt_classes
,
dim
=
0
)
pred_mask_logits
=
pred_mask_logits
[
indices
,
gt_classes
]
if
gt_masks
.
dtype
==
torch
.
bool
:
gt_masks_bool
=
gt_masks
else
:
# Here we allow gt_masks to be float as well (depend on the implementation of rasterize())
gt_masks_bool
=
gt_masks
>
0.5
gt_masks
=
gt_masks
.
to
(
dtype
=
torch
.
float32
)
# Log the training accuracy (using gt classes and 0.5 threshold)
mask_incorrect
=
(
pred_mask_logits
>
0.0
)
!=
gt_masks_bool
mask_accuracy
=
1
-
(
mask_incorrect
.
sum
().
item
()
/
max
(
mask_incorrect
.
numel
(),
1.0
))
num_positive
=
gt_masks_bool
.
sum
().
item
()
false_positive
=
(
mask_incorrect
&
~
gt_masks_bool
).
sum
().
item
()
/
max
(
gt_masks_bool
.
numel
()
-
num_positive
,
1.0
)
false_negative
=
(
mask_incorrect
&
gt_masks_bool
).
sum
().
item
()
/
max
(
num_positive
,
1.0
)
storage
=
get_event_storage
()
storage
.
put_scalar
(
"mask_rcnn/accuracy"
,
mask_accuracy
)
storage
.
put_scalar
(
"mask_rcnn/false_positive"
,
false_positive
)
storage
.
put_scalar
(
"mask_rcnn/false_negative"
,
false_negative
)
if
vis_period
>
0
and
storage
.
iter
%
vis_period
==
0
:
pred_masks
=
pred_mask_logits
.
sigmoid
()
vis_masks
=
torch
.
cat
([
pred_masks
,
gt_masks
],
axis
=
2
)
name
=
"Left: mask prediction; Right: mask GT"
for
idx
,
vis_mask
in
enumerate
(
vis_masks
):
vis_mask
=
torch
.
stack
([
vis_mask
]
*
3
,
axis
=
0
)
storage
.
put_image
(
name
+
f
" (
{
idx
}
)"
,
vis_mask
)
mask_loss
=
F
.
binary_cross_entropy_with_logits
(
pred_mask_logits
,
gt_masks
,
reduction
=
"mean"
)
return
mask_loss
def
mask_rcnn_inference
(
pred_mask_logits
,
pred_instances
):
"""
Convert pred_mask_logits to estimated foreground probability masks while also
extracting only the masks for the predicted classes in pred_instances. For each
predicted box, the mask of the same class is attached to the instance by adding a
new "pred_masks" field to pred_instances.
Args:
pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
for class-specific or class-agnostic, where B is the total number of predicted masks
in all images, C is the number of foreground classes, and Hmask, Wmask are the height
and width of the mask predictions. The values are logits.
pred_instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. Each Instances must have field "pred_classes".
Returns:
None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask,
Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized)
masks the resolution predicted by the network; post-processing steps, such as resizing
the predicted masks to the original image resolution and/or binarizing them, is left
to the caller.
"""
cls_agnostic_mask
=
pred_mask_logits
.
size
(
1
)
==
1
if
cls_agnostic_mask
:
mask_probs_pred
=
pred_mask_logits
.
sigmoid
()
else
:
# Select masks corresponding to the predicted classes
num_masks
=
pred_mask_logits
.
shape
[
0
]
class_pred
=
cat
([
i
.
pred_classes
for
i
in
pred_instances
])
indices
=
torch
.
arange
(
num_masks
,
device
=
class_pred
.
device
)
mask_probs_pred
=
pred_mask_logits
[
indices
,
class_pred
][:,
None
].
sigmoid
()
# mask_probs_pred.shape: (B, 1, Hmask, Wmask)
num_boxes_per_image
=
[
len
(
i
)
for
i
in
pred_instances
]
mask_probs_pred
=
mask_probs_pred
.
split
(
num_boxes_per_image
,
dim
=
0
)
for
prob
,
instances
in
zip
(
mask_probs_pred
,
pred_instances
):
instances
.
pred_masks
=
prob
# (1, Hmask, Wmask)
class
BaseMaskRCNNHead
(
nn
.
Module
):
"""
Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN`
"""
@
configurable
def
__init__
(
self
,
*
,
vis_period
=
0
):
"""
NOTE: this interface is experimental.
Args:
vis_period (int): visualization period
"""
super
().
__init__
()
self
.
vis_period
=
vis_period
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
return
{
"vis_period"
:
cfg
.
VIS_PERIOD
}
def
forward
(
self
,
x
,
instances
:
List
[
Instances
]):
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
x
=
self
.
layers
(
x
)
if
self
.
training
:
return
{
"loss_mask"
:
mask_rcnn_loss
(
x
,
instances
,
self
.
vis_period
)}
else
:
mask_rcnn_inference
(
x
,
instances
)
return
instances
def
layers
(
self
,
x
):
"""
Neural network layers that makes predictions from input features.
"""
raise
NotImplementedError
@
ROI_MASK_HEAD_REGISTRY
.
register
()
class
MaskRCNNConvUpsampleHead
(
BaseMaskRCNNHead
):
"""
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
Predictions are made with a final 1x1 conv layer.
"""
@
configurable
def
__init__
(
self
,
input_shape
:
ShapeSpec
,
*
,
num_classes
,
conv_dims
,
conv_norm
=
""
,
**
kwargs
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
num_classes (int): the number of classes. 1 if using class agnostic prediction.
conv_dims (list[int]): a list of N>0 integers representing the output dimensions
of N-1 conv layers and the last upsample layer.
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
"""
super
().
__init__
(
**
kwargs
)
assert
len
(
conv_dims
)
>=
1
,
"conv_dims have to be non-empty!"
self
.
conv_norm_relus
=
[]
cur_channels
=
input_shape
.
channels
for
k
,
conv_dim
in
enumerate
(
conv_dims
[:
-
1
]):
conv
=
Conv2d
(
cur_channels
,
conv_dim
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
not
conv_norm
,
norm
=
get_norm
(
conv_norm
,
conv_dim
),
activation
=
F
.
relu
,
)
self
.
add_module
(
"mask_fcn{}"
.
format
(
k
+
1
),
conv
)
self
.
conv_norm_relus
.
append
(
conv
)
cur_channels
=
conv_dim
self
.
deconv
=
ConvTranspose2d
(
cur_channels
,
conv_dims
[
-
1
],
kernel_size
=
2
,
stride
=
2
,
padding
=
0
)
cur_channels
=
conv_dims
[
-
1
]
self
.
predictor
=
Conv2d
(
cur_channels
,
num_classes
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
)
for
layer
in
self
.
conv_norm_relus
+
[
self
.
deconv
]:
weight_init
.
c2_msra_fill
(
layer
)
# use normal distribution initialization for mask prediction layer
nn
.
init
.
normal_
(
self
.
predictor
.
weight
,
std
=
0.001
)
if
self
.
predictor
.
bias
is
not
None
:
nn
.
init
.
constant_
(
self
.
predictor
.
bias
,
0
)
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
,
input_shape
)
conv_dim
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
CONV_DIM
num_conv
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NUM_CONV
ret
.
update
(
conv_dims
=
[
conv_dim
]
*
(
num_conv
+
1
),
# +1 for ConvTranspose
conv_norm
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NORM
,
input_shape
=
input_shape
,
)
if
cfg
.
MODEL
.
ROI_MASK_HEAD
.
CLS_AGNOSTIC_MASK
:
ret
[
"num_classes"
]
=
1
else
:
ret
[
"num_classes"
]
=
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
return
ret
def
layers
(
self
,
x
):
for
layer
in
self
.
conv_norm_relus
:
x
=
layer
(
x
)
x
=
F
.
relu
(
self
.
deconv
(
x
))
return
self
.
predictor
(
x
)
def
build_mask_head
(
cfg
,
input_shape
):
"""
Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
NAME
return
ROI_MASK_HEAD_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
detectron2/modeling/roi_heads/roi_heads.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
inspect
import
logging
import
numpy
as
np
from
typing
import
Dict
,
List
,
Optional
,
Tuple
,
Union
import
torch
from
torch
import
nn
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
from
detectron2.structures
import
Boxes
,
ImageList
,
Instances
,
pairwise_iou
from
detectron2.utils.events
import
get_event_storage
from
detectron2.utils.registry
import
Registry
from
..backbone.resnet
import
BottleneckBlock
,
make_stage
from
..matcher
import
Matcher
from
..poolers
import
ROIPooler
from
..proposal_generator.proposal_utils
import
add_ground_truth_to_proposals
from
..sampling
import
subsample_labels
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.keypoint_head
import
build_keypoint_head
from
.mask_head
import
build_mask_head
ROI_HEADS_REGISTRY
=
Registry
(
"ROI_HEADS"
)
ROI_HEADS_REGISTRY
.
__doc__
=
"""
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger
=
logging
.
getLogger
(
__name__
)
def
build_roi_heads
(
cfg
,
input_shape
):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name
=
cfg
.
MODEL
.
ROI_HEADS
.
NAME
return
ROI_HEADS_REGISTRY
.
get
(
name
)(
cfg
,
input_shape
)
def
select_foreground_proposals
(
proposals
:
List
[
Instances
],
bg_label
:
int
)
->
Tuple
[
List
[
Instances
],
List
[
torch
.
Tensor
]]:
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert
isinstance
(
proposals
,
(
list
,
tuple
))
assert
isinstance
(
proposals
[
0
],
Instances
)
assert
proposals
[
0
].
has
(
"gt_classes"
)
fg_proposals
=
[]
fg_selection_masks
=
[]
for
proposals_per_image
in
proposals
:
gt_classes
=
proposals_per_image
.
gt_classes
fg_selection_mask
=
(
gt_classes
!=
-
1
)
&
(
gt_classes
!=
bg_label
)
fg_idxs
=
fg_selection_mask
.
nonzero
().
squeeze
(
1
)
fg_proposals
.
append
(
proposals_per_image
[
fg_idxs
])
fg_selection_masks
.
append
(
fg_selection_mask
)
return
fg_proposals
,
fg_selection_masks
def
select_proposals_with_visible_keypoints
(
proposals
:
List
[
Instances
])
->
List
[
Instances
]:
"""
Args:
proposals (list[Instances]): a list of N Instances, where N is the
number of images.
Returns:
proposals: only contains proposals with at least one visible keypoint.
Note that this is still slightly different from Detectron.
In Detectron, proposals for training keypoint head are re-sampled from
all the proposals with IOU>threshold & >=1 visible keypoint.
Here, the proposals are first sampled from all proposals with
IOU>threshold, then proposals with no visible keypoint are filtered out.
This strategy seems to make no difference on Detectron and is easier to implement.
"""
ret
=
[]
all_num_fg
=
[]
for
proposals_per_image
in
proposals
:
# If empty/unannotated image (hard negatives), skip filtering for train
if
len
(
proposals_per_image
)
==
0
:
ret
.
append
(
proposals_per_image
)
continue
gt_keypoints
=
proposals_per_image
.
gt_keypoints
.
tensor
# #fg x K x 3
vis_mask
=
gt_keypoints
[:,
:,
2
]
>=
1
xs
,
ys
=
gt_keypoints
[:,
:,
0
],
gt_keypoints
[:,
:,
1
]
proposal_boxes
=
proposals_per_image
.
proposal_boxes
.
tensor
.
unsqueeze
(
dim
=
1
)
# #fg x 1 x 4
kp_in_box
=
(
(
xs
>=
proposal_boxes
[:,
:,
0
])
&
(
xs
<=
proposal_boxes
[:,
:,
2
])
&
(
ys
>=
proposal_boxes
[:,
:,
1
])
&
(
ys
<=
proposal_boxes
[:,
:,
3
])
)
selection
=
(
kp_in_box
&
vis_mask
).
any
(
dim
=
1
)
selection_idxs
=
torch
.
nonzero
(
selection
,
as_tuple
=
True
)[
0
]
all_num_fg
.
append
(
selection_idxs
.
numel
())
ret
.
append
(
proposals_per_image
[
selection_idxs
])
storage
=
get_event_storage
()
storage
.
put_scalar
(
"keypoint_head/num_fg_samples"
,
np
.
mean
(
all_num_fg
))
return
ret
class
ROIHeads
(
torch
.
nn
.
Module
):
"""
ROIHeads perform all per-region computation in an R-CNN.
It typically contains logic to
1. (in training only) match proposals with ground truth and sample them
2. crop the regions and extract per-region features using proposals
3. make per-region predictions with different heads
It can have many variants, implemented as subclasses of this class.
This base class contains the logic to match/sample proposals.
But it is not necessary to inherit this class if the sampling logic is not needed.
"""
@
configurable
def
__init__
(
self
,
*
,
num_classes
,
batch_size_per_image
,
positive_sample_fraction
,
proposal_matcher
,
proposal_append_gt
=
True
):
"""
NOTE: this interface is experimental.
Args:
num_classes (int): number of classes. Used to label background proposals.
batch_size_per_image (int): number of proposals to use for training
positive_sample_fraction (float): fraction of positive (foreground) proposals
to use for training.
proposal_matcher (Matcher): matcher that matches proposals and ground truth
proposal_append_gt (bool): whether to include ground truth as proposals as well
"""
super
().
__init__
()
self
.
batch_size_per_image
=
batch_size_per_image
self
.
positive_sample_fraction
=
positive_sample_fraction
self
.
num_classes
=
num_classes
self
.
proposal_matcher
=
proposal_matcher
self
.
proposal_append_gt
=
proposal_append_gt
@
classmethod
def
from_config
(
cls
,
cfg
):
return
{
"batch_size_per_image"
:
cfg
.
MODEL
.
ROI_HEADS
.
BATCH_SIZE_PER_IMAGE
,
"positive_sample_fraction"
:
cfg
.
MODEL
.
ROI_HEADS
.
POSITIVE_FRACTION
,
"num_classes"
:
cfg
.
MODEL
.
ROI_HEADS
.
NUM_CLASSES
,
"proposal_append_gt"
:
cfg
.
MODEL
.
ROI_HEADS
.
PROPOSAL_APPEND_GT
,
# Matcher to assign box proposals to gt boxes
"proposal_matcher"
:
Matcher
(
cfg
.
MODEL
.
ROI_HEADS
.
IOU_THRESHOLDS
,
cfg
.
MODEL
.
ROI_HEADS
.
IOU_LABELS
,
allow_low_quality_matches
=
False
,
),
}
def
_sample_proposals
(
self
,
matched_idxs
:
torch
.
Tensor
,
matched_labels
:
torch
.
Tensor
,
gt_classes
:
torch
.
Tensor
)
->
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt
=
gt_classes
.
numel
()
>
0
# Get the corresponding GT for each proposal
if
has_gt
:
gt_classes
=
gt_classes
[
matched_idxs
]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes
[
matched_labels
==
0
]
=
self
.
num_classes
# Label ignore proposals (-1 label)
gt_classes
[
matched_labels
==
-
1
]
=
-
1
else
:
gt_classes
=
torch
.
zeros_like
(
matched_idxs
)
+
self
.
num_classes
sampled_fg_idxs
,
sampled_bg_idxs
=
subsample_labels
(
gt_classes
,
self
.
batch_size_per_image
,
self
.
positive_sample_fraction
,
self
.
num_classes
)
sampled_idxs
=
torch
.
cat
([
sampled_fg_idxs
,
sampled_bg_idxs
],
dim
=
0
)
return
sampled_idxs
,
gt_classes
[
sampled_idxs
]
@
torch
.
no_grad
()
def
label_and_sample_proposals
(
self
,
proposals
:
List
[
Instances
],
targets
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_sample_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes
=
[
x
.
gt_boxes
for
x
in
targets
]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if
self
.
proposal_append_gt
:
proposals
=
add_ground_truth_to_proposals
(
gt_boxes
,
proposals
)
proposals_with_gt
=
[]
num_fg_samples
=
[]
num_bg_samples
=
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
has_gt
=
len
(
targets_per_image
)
>
0
match_quality_matrix
=
pairwise_iou
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
matched_idxs
,
matched_labels
=
self
.
proposal_matcher
(
match_quality_matrix
)
sampled_idxs
,
gt_classes
=
self
.
_sample_proposals
(
matched_idxs
,
matched_labels
,
targets_per_image
.
gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image
=
proposals_per_image
[
sampled_idxs
]
proposals_per_image
.
gt_classes
=
gt_classes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if
has_gt
:
sampled_targets
=
matched_idxs
[
sampled_idxs
]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for
(
trg_name
,
trg_value
)
in
targets_per_image
.
get_fields
().
items
():
if
trg_name
.
startswith
(
"gt_"
)
and
not
proposals_per_image
.
has
(
trg_name
):
proposals_per_image
.
set
(
trg_name
,
trg_value
[
sampled_targets
])
else
:
gt_boxes
=
Boxes
(
targets_per_image
.
gt_boxes
.
tensor
.
new_zeros
((
len
(
sampled_idxs
),
4
))
)
proposals_per_image
.
gt_boxes
=
gt_boxes
num_bg_samples
.
append
((
gt_classes
==
self
.
num_classes
).
sum
().
item
())
num_fg_samples
.
append
(
gt_classes
.
numel
()
-
num_bg_samples
[
-
1
])
proposals_with_gt
.
append
(
proposals_per_image
)
# Log the number of fg/bg samples that are selected for training ROI heads
storage
=
get_event_storage
()
storage
.
put_scalar
(
"roi_head/num_fg_samples"
,
np
.
mean
(
num_fg_samples
))
storage
.
put_scalar
(
"roi_head/num_bg_samples"
,
np
.
mean
(
num_bg_samples
))
return
proposals_with_gt
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
],
targets
:
Optional
[
List
[
Instances
]]
=
None
,
)
->
Tuple
[
List
[
Instances
],
Dict
[
str
,
torch
.
Tensor
]]:
"""
Args:
images (ImageList):
features (dict[str,Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
- gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
- gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
Returns:
list[Instances]: length `N` list of `Instances` containing the
detected instances. Returned during inference only; may be [] during training.
dict[str->Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
raise
NotImplementedError
()
@
ROI_HEADS_REGISTRY
.
register
()
class
Res5ROIHeads
(
ROIHeads
):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def
__init__
(
self
,
cfg
,
input_shape
):
super
().
__init__
(
cfg
)
# fmt: off
self
.
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
pooler_scales
=
(
1.0
/
input_shape
[
self
.
in_features
[
0
]].
stride
,
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
self
.
mask_on
=
cfg
.
MODEL
.
MASK_ON
# fmt: on
assert
not
cfg
.
MODEL
.
KEYPOINT_ON
assert
len
(
self
.
in_features
)
==
1
self
.
pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
self
.
res5
,
out_channels
=
self
.
_build_res5_block
(
cfg
)
self
.
box_predictor
=
FastRCNNOutputLayers
(
cfg
,
ShapeSpec
(
channels
=
out_channels
,
height
=
1
,
width
=
1
)
)
if
self
.
mask_on
:
self
.
mask_head
=
build_mask_head
(
cfg
,
ShapeSpec
(
channels
=
out_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
),
)
def
_build_res5_block
(
self
,
cfg
):
# fmt: off
stage_channel_factor
=
2
**
3
# res5 is 8x res2
num_groups
=
cfg
.
MODEL
.
RESNETS
.
NUM_GROUPS
width_per_group
=
cfg
.
MODEL
.
RESNETS
.
WIDTH_PER_GROUP
bottleneck_channels
=
num_groups
*
width_per_group
*
stage_channel_factor
out_channels
=
cfg
.
MODEL
.
RESNETS
.
RES2_OUT_CHANNELS
*
stage_channel_factor
stride_in_1x1
=
cfg
.
MODEL
.
RESNETS
.
STRIDE_IN_1X1
norm
=
cfg
.
MODEL
.
RESNETS
.
NORM
assert
not
cfg
.
MODEL
.
RESNETS
.
DEFORM_ON_PER_STAGE
[
-
1
],
\
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks
=
make_stage
(
BottleneckBlock
,
3
,
first_stride
=
2
,
in_channels
=
out_channels
//
2
,
bottleneck_channels
=
bottleneck_channels
,
out_channels
=
out_channels
,
num_groups
=
num_groups
,
norm
=
norm
,
stride_in_1x1
=
stride_in_1x1
,
)
return
nn
.
Sequential
(
*
blocks
),
out_channels
def
_shared_roi_transform
(
self
,
features
,
boxes
):
x
=
self
.
pooler
(
features
,
boxes
)
return
self
.
res5
(
x
)
def
forward
(
self
,
images
,
features
,
proposals
,
targets
=
None
):
"""
See :meth:`ROIHeads.forward`.
"""
del
images
if
self
.
training
:
assert
targets
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
del
targets
proposal_boxes
=
[
x
.
proposal_boxes
for
x
in
proposals
]
box_features
=
self
.
_shared_roi_transform
(
[
features
[
f
]
for
f
in
self
.
in_features
],
proposal_boxes
)
predictions
=
self
.
box_predictor
(
box_features
.
mean
(
dim
=
[
2
,
3
]))
if
self
.
training
:
del
features
losses
=
self
.
box_predictor
.
losses
(
predictions
,
proposals
)
if
self
.
mask_on
:
proposals
,
fg_selection_masks
=
select_foreground_proposals
(
proposals
,
self
.
num_classes
)
# Since the ROI feature transform is shared between boxes and masks,
# we don't need to recompute features. The mask loss is only defined
# on foreground proposals, so we need to select out the foreground
# features.
mask_features
=
box_features
[
torch
.
cat
(
fg_selection_masks
,
dim
=
0
)]
del
box_features
losses
.
update
(
self
.
mask_head
(
mask_features
,
proposals
))
return
[],
losses
else
:
pred_instances
,
_
=
self
.
box_predictor
.
inference
(
predictions
,
proposals
)
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
forward_with_given_boxes
(
self
,
features
,
instances
):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances):
the same `Instances` object, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert
not
self
.
training
assert
instances
[
0
].
has
(
"pred_boxes"
)
and
instances
[
0
].
has
(
"pred_classes"
)
if
self
.
mask_on
:
features
=
[
features
[
f
]
for
f
in
self
.
in_features
]
x
=
self
.
_shared_roi_transform
(
features
,
[
x
.
pred_boxes
for
x
in
instances
])
return
self
.
mask_head
(
x
,
instances
)
else
:
return
instances
@
ROI_HEADS_REGISTRY
.
register
()
class
StandardROIHeads
(
ROIHeads
):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@
configurable
def
__init__
(
self
,
*
,
box_in_features
:
List
[
str
],
box_pooler
:
ROIPooler
,
box_head
:
nn
.
Module
,
box_predictor
:
nn
.
Module
,
mask_in_features
:
Optional
[
List
[
str
]]
=
None
,
mask_pooler
:
Optional
[
ROIPooler
]
=
None
,
mask_head
:
Optional
[
nn
.
Module
]
=
None
,
keypoint_in_features
:
Optional
[
List
[
str
]]
=
None
,
keypoint_pooler
:
Optional
[
ROIPooler
]
=
None
,
keypoint_head
:
Optional
[
nn
.
Module
]
=
None
,
train_on_pred_boxes
:
bool
=
False
,
**
kwargs
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask head.
None if not using mask head.
mask_pooler (ROIPooler): pooler to extra region features for mask head
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super
().
__init__
(
**
kwargs
)
# keep self.in_features for backward compatibility
self
.
in_features
=
self
.
box_in_features
=
box_in_features
self
.
box_pooler
=
box_pooler
self
.
box_head
=
box_head
self
.
box_predictor
=
box_predictor
self
.
mask_on
=
mask_in_features
is
not
None
if
self
.
mask_on
:
self
.
mask_in_features
=
mask_in_features
self
.
mask_pooler
=
mask_pooler
self
.
mask_head
=
mask_head
self
.
keypoint_on
=
keypoint_in_features
is
not
None
if
self
.
keypoint_on
:
self
.
keypoint_in_features
=
keypoint_in_features
self
.
keypoint_pooler
=
keypoint_pooler
self
.
keypoint_head
=
keypoint_head
self
.
train_on_pred_boxes
=
train_on_pred_boxes
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
ret
=
super
().
from_config
(
cfg
)
ret
[
"train_on_pred_boxes"
]
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if
inspect
.
ismethod
(
cls
.
_init_box_head
):
ret
.
update
(
cls
.
_init_box_head
(
cfg
,
input_shape
))
if
inspect
.
ismethod
(
cls
.
_init_mask_head
):
ret
.
update
(
cls
.
_init_mask_head
(
cfg
,
input_shape
))
if
inspect
.
ismethod
(
cls
.
_init_keypoint_head
):
ret
.
update
(
cls
.
_init_keypoint_head
(
cfg
,
input_shape
))
return
ret
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
]
# Check all channel counts are equal
assert
len
(
set
(
in_channels
))
==
1
,
in_channels
in_channels
=
in_channels
[
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head
=
build_box_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
height
=
pooler_resolution
,
width
=
pooler_resolution
)
)
box_predictor
=
FastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
)
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_head"
:
box_head
,
"box_predictor"
:
box_predictor
,
}
@
classmethod
def
_init_mask_head
(
cls
,
cfg
,
input_shape
):
if
not
cfg
.
MODEL
.
MASK_ON
:
return
{}
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_MASK_HEAD
.
POOLER_TYPE
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
ret
=
{
"mask_in_features"
:
in_features
}
ret
[
"mask_pooler"
]
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
ret
[
"mask_head"
]
=
build_mask_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
)
return
ret
@
classmethod
def
_init_keypoint_head
(
cls
,
cfg
,
input_shape
):
if
not
cfg
.
MODEL
.
KEYPOINT_ON
:
return
{}
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
# noqa
sampling_ratio
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_KEYPOINT_HEAD
.
POOLER_TYPE
# fmt: on
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
ret
=
{
"keypoint_in_features"
:
in_features
}
ret
[
"keypoint_pooler"
]
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
ret
[
"keypoint_head"
]
=
build_keypoint_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
width
=
pooler_resolution
,
height
=
pooler_resolution
)
)
return
ret
def
forward
(
self
,
images
:
ImageList
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
],
targets
:
Optional
[
List
[
Instances
]]
=
None
,
)
->
Tuple
[
List
[
Instances
],
Dict
[
str
,
torch
.
Tensor
]]:
"""
See :class:`ROIHeads.forward`.
"""
del
images
if
self
.
training
:
assert
targets
proposals
=
self
.
label_and_sample_proposals
(
proposals
,
targets
)
del
targets
if
self
.
training
:
losses
=
self
.
_forward_box
(
features
,
proposals
)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses
.
update
(
self
.
_forward_mask
(
features
,
proposals
))
losses
.
update
(
self
.
_forward_keypoint
(
features
,
proposals
))
return
proposals
,
losses
else
:
pred_instances
=
self
.
_forward_box
(
features
,
proposals
)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances
=
self
.
forward_with_given_boxes
(
features
,
pred_instances
)
return
pred_instances
,
{}
def
forward_with_given_boxes
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]
)
->
List
[
Instances
]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (list[Instances]):
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert
not
self
.
training
assert
instances
[
0
].
has
(
"pred_boxes"
)
and
instances
[
0
].
has
(
"pred_classes"
)
instances
=
self
.
_forward_mask
(
features
,
instances
)
instances
=
self
.
_forward_keypoint
(
features
,
instances
)
return
instances
def
_forward_box
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
proposals
:
List
[
Instances
]
)
->
Union
[
Dict
[
str
,
torch
.
Tensor
],
List
[
Instances
]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features
=
[
features
[
f
]
for
f
in
self
.
box_in_features
]
box_features
=
self
.
box_pooler
(
features
,
[
x
.
proposal_boxes
for
x
in
proposals
])
box_features
=
self
.
box_head
(
box_features
)
predictions
=
self
.
box_predictor
(
box_features
)
del
box_features
if
self
.
training
:
losses
=
self
.
box_predictor
.
losses
(
predictions
,
proposals
)
# proposals is modified in-place below, so losses must be computed first.
if
self
.
train_on_pred_boxes
:
with
torch
.
no_grad
():
pred_boxes
=
self
.
box_predictor
.
predict_boxes_for_gt_classes
(
predictions
,
proposals
)
for
proposals_per_image
,
pred_boxes_per_image
in
zip
(
proposals
,
pred_boxes
):
proposals_per_image
.
proposal_boxes
=
Boxes
(
pred_boxes_per_image
)
return
losses
else
:
pred_instances
,
_
=
self
.
box_predictor
.
inference
(
predictions
,
proposals
)
return
pred_instances
def
_forward_mask
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]
)
->
Union
[
Dict
[
str
,
torch
.
Tensor
],
List
[
Instances
]]:
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if
not
self
.
mask_on
:
return
{}
if
self
.
training
else
instances
features
=
[
features
[
f
]
for
f
in
self
.
mask_in_features
]
if
self
.
training
:
# The loss is only defined on positive proposals.
proposals
,
_
=
select_foreground_proposals
(
instances
,
self
.
num_classes
)
proposal_boxes
=
[
x
.
proposal_boxes
for
x
in
proposals
]
mask_features
=
self
.
mask_pooler
(
features
,
proposal_boxes
)
return
self
.
mask_head
(
mask_features
,
proposals
)
else
:
pred_boxes
=
[
x
.
pred_boxes
for
x
in
instances
]
mask_features
=
self
.
mask_pooler
(
features
,
pred_boxes
)
return
self
.
mask_head
(
mask_features
,
instances
)
def
_forward_keypoint
(
self
,
features
:
Dict
[
str
,
torch
.
Tensor
],
instances
:
List
[
Instances
]
)
->
Union
[
Dict
[
str
,
torch
.
Tensor
],
List
[
Instances
]]:
"""
Forward logic of the keypoint prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if
not
self
.
keypoint_on
:
return
{}
if
self
.
training
else
instances
features
=
[
features
[
f
]
for
f
in
self
.
keypoint_in_features
]
if
self
.
training
:
# The loss is defined on positive proposals with >=1 visible keypoints.
proposals
,
_
=
select_foreground_proposals
(
instances
,
self
.
num_classes
)
proposals
=
select_proposals_with_visible_keypoints
(
proposals
)
proposal_boxes
=
[
x
.
proposal_boxes
for
x
in
proposals
]
keypoint_features
=
self
.
keypoint_pooler
(
features
,
proposal_boxes
)
return
self
.
keypoint_head
(
keypoint_features
,
proposals
)
else
:
pred_boxes
=
[
x
.
pred_boxes
for
x
in
instances
]
keypoint_features
=
self
.
keypoint_pooler
(
features
,
pred_boxes
)
return
self
.
keypoint_head
(
keypoint_features
,
instances
)
detectron2/modeling/roi_heads/rotated_fast_rcnn.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
logging
import
numpy
as
np
import
torch
from
detectron2.config
import
configurable
from
detectron2.layers
import
ShapeSpec
,
batched_nms_rotated
from
detectron2.structures
import
Instances
,
RotatedBoxes
,
pairwise_iou_rotated
from
detectron2.utils.events
import
get_event_storage
from
..box_regression
import
Box2BoxTransformRotated
from
..poolers
import
ROIPooler
from
..proposal_generator.proposal_utils
import
add_ground_truth_to_proposals
from
.box_head
import
build_box_head
from
.fast_rcnn
import
FastRCNNOutputLayers
from
.roi_heads
import
ROI_HEADS_REGISTRY
,
StandardROIHeads
logger
=
logging
.
getLogger
(
__name__
)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransformRotated`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth rotated box2box transform deltas
"""
def
fast_rcnn_inference_rotated
(
boxes
,
scores
,
image_shapes
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Call `fast_rcnn_inference_single_image_rotated` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 5) if doing
class-specific regression, or (Ri, 5) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image
=
[
fast_rcnn_inference_single_image_rotated
(
boxes_per_image
,
scores_per_image
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
)
for
scores_per_image
,
boxes_per_image
,
image_shape
in
zip
(
scores
,
boxes
,
image_shapes
)
]
return
[
x
[
0
]
for
x
in
result_per_image
],
[
x
[
1
]
for
x
in
result_per_image
]
def
fast_rcnn_inference_single_image_rotated
(
boxes
,
scores
,
image_shape
,
score_thresh
,
nms_thresh
,
topk_per_image
):
"""
Single-image inference. Return rotated bounding-box detection results by thresholding
on scores and applying rotated non-maximum suppression (Rotated NMS).
Args:
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference_rotated`, but for only one image.
"""
valid_mask
=
torch
.
isfinite
(
boxes
).
all
(
dim
=
1
)
&
torch
.
isfinite
(
scores
).
all
(
dim
=
1
)
if
not
valid_mask
.
all
():
boxes
=
boxes
[
valid_mask
]
scores
=
scores
[
valid_mask
]
B
=
5
# box dimension
scores
=
scores
[:,
:
-
1
]
num_bbox_reg_classes
=
boxes
.
shape
[
1
]
//
B
# Convert to Boxes to use the `clip` function ...
boxes
=
RotatedBoxes
(
boxes
.
reshape
(
-
1
,
B
))
boxes
.
clip
(
image_shape
)
boxes
=
boxes
.
tensor
.
view
(
-
1
,
num_bbox_reg_classes
,
B
)
# R x C x B
# Filter results based on detection scores
filter_mask
=
scores
>
score_thresh
# R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds
=
filter_mask
.
nonzero
()
if
num_bbox_reg_classes
==
1
:
boxes
=
boxes
[
filter_inds
[:,
0
],
0
]
else
:
boxes
=
boxes
[
filter_mask
]
scores
=
scores
[
filter_mask
]
# Apply per-class Rotated NMS
keep
=
batched_nms_rotated
(
boxes
,
scores
,
filter_inds
[:,
1
],
nms_thresh
)
if
topk_per_image
>=
0
:
keep
=
keep
[:
topk_per_image
]
boxes
,
scores
,
filter_inds
=
boxes
[
keep
],
scores
[
keep
],
filter_inds
[
keep
]
result
=
Instances
(
image_shape
)
result
.
pred_boxes
=
RotatedBoxes
(
boxes
)
result
.
scores
=
scores
result
.
pred_classes
=
filter_inds
[:,
1
]
return
result
,
filter_inds
[:,
0
]
class
RotatedFastRCNNOutputLayers
(
FastRCNNOutputLayers
):
"""
Two linear layers for predicting Rotated Fast R-CNN outputs.
"""
@
classmethod
def
from_config
(
cls
,
cfg
,
input_shape
):
args
=
super
().
from_config
(
cfg
,
input_shape
)
args
[
"box2box_transform"
]
=
Box2BoxTransformRotated
(
weights
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
BBOX_REG_WEIGHTS
)
return
args
def
inference
(
self
,
predictions
,
proposals
):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference_rotated`.
list[Tensor]: same as `fast_rcnn_inference_rotated`.
"""
boxes
=
self
.
predict_boxes
(
predictions
,
proposals
)
scores
=
self
.
predict_probs
(
predictions
,
proposals
)
image_shapes
=
[
x
.
image_size
for
x
in
proposals
]
return
fast_rcnn_inference_rotated
(
boxes
,
scores
,
image_shapes
,
self
.
test_score_thresh
,
self
.
test_nms_thresh
,
self
.
test_topk_per_image
,
)
@
ROI_HEADS_REGISTRY
.
register
()
class
RROIHeads
(
StandardROIHeads
):
"""
This class is used by Rotated Fast R-CNN to detect rotated boxes.
For now, it only supports box predictions but not mask or keypoints.
"""
@
configurable
def
__init__
(
self
,
**
kwargs
):
"""
NOTE: this interface is experimental.
"""
super
().
__init__
(
**
kwargs
)
assert
(
not
self
.
mask_on
and
not
self
.
keypoint_on
),
"Mask/Keypoints not supported in Rotated ROIHeads."
assert
not
self
.
train_on_pred_boxes
,
"train_on_pred_boxes not implemented for RROIHeads!"
@
classmethod
def
_init_box_head
(
cls
,
cfg
,
input_shape
):
# fmt: off
in_features
=
cfg
.
MODEL
.
ROI_HEADS
.
IN_FEATURES
pooler_resolution
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_RESOLUTION
pooler_scales
=
tuple
(
1.0
/
input_shape
[
k
].
stride
for
k
in
in_features
)
sampling_ratio
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_SAMPLING_RATIO
pooler_type
=
cfg
.
MODEL
.
ROI_BOX_HEAD
.
POOLER_TYPE
# fmt: on
assert
pooler_type
in
[
"ROIAlignRotated"
],
pooler_type
# assume all channel counts are equal
in_channels
=
[
input_shape
[
f
].
channels
for
f
in
in_features
][
0
]
box_pooler
=
ROIPooler
(
output_size
=
pooler_resolution
,
scales
=
pooler_scales
,
sampling_ratio
=
sampling_ratio
,
pooler_type
=
pooler_type
,
)
box_head
=
build_box_head
(
cfg
,
ShapeSpec
(
channels
=
in_channels
,
height
=
pooler_resolution
,
width
=
pooler_resolution
)
)
# This line is the only difference v.s. StandardROIHeads
box_predictor
=
RotatedFastRCNNOutputLayers
(
cfg
,
box_head
.
output_shape
)
return
{
"box_in_features"
:
in_features
,
"box_pooler"
:
box_pooler
,
"box_head"
:
box_head
,
"box_predictor"
:
box_predictor
,
}
@
torch
.
no_grad
()
def
label_and_sample_proposals
(
self
,
proposals
,
targets
):
"""
Prepare some proposals to be used to train the RROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`StandardROIHeads.forward`
Returns:
list[Instances]: length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the rotated proposal boxes
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
- gt_classes: the ground-truth classification lable for each proposal
"""
gt_boxes
=
[
x
.
gt_boxes
for
x
in
targets
]
if
self
.
proposal_append_gt
:
proposals
=
add_ground_truth_to_proposals
(
gt_boxes
,
proposals
)
proposals_with_gt
=
[]
num_fg_samples
=
[]
num_bg_samples
=
[]
for
proposals_per_image
,
targets_per_image
in
zip
(
proposals
,
targets
):
has_gt
=
len
(
targets_per_image
)
>
0
match_quality_matrix
=
pairwise_iou_rotated
(
targets_per_image
.
gt_boxes
,
proposals_per_image
.
proposal_boxes
)
matched_idxs
,
matched_labels
=
self
.
proposal_matcher
(
match_quality_matrix
)
sampled_idxs
,
gt_classes
=
self
.
_sample_proposals
(
matched_idxs
,
matched_labels
,
targets_per_image
.
gt_classes
)
proposals_per_image
=
proposals_per_image
[
sampled_idxs
]
proposals_per_image
.
gt_classes
=
gt_classes
if
has_gt
:
sampled_targets
=
matched_idxs
[
sampled_idxs
]
proposals_per_image
.
gt_boxes
=
targets_per_image
.
gt_boxes
[
sampled_targets
]
else
:
gt_boxes
=
RotatedBoxes
(
targets_per_image
.
gt_boxes
.
tensor
.
new_zeros
((
len
(
sampled_idxs
),
5
))
)
proposals_per_image
.
gt_boxes
=
gt_boxes
num_bg_samples
.
append
((
gt_classes
==
self
.
num_classes
).
sum
().
item
())
num_fg_samples
.
append
(
gt_classes
.
numel
()
-
num_bg_samples
[
-
1
])
proposals_with_gt
.
append
(
proposals_per_image
)
# Log the number of fg/bg samples that are selected for training ROI heads
storage
=
get_event_storage
()
storage
.
put_scalar
(
"roi_head/num_fg_samples"
,
np
.
mean
(
num_fg_samples
))
storage
.
put_scalar
(
"roi_head/num_bg_samples"
,
np
.
mean
(
num_bg_samples
))
return
proposals_with_gt
detectron2/modeling/sampling.py
0 → 100644
View file @
c732df65
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import
torch
__all__
=
[
"subsample_labels"
]
def
subsample_labels
(
labels
,
num_samples
,
positive_fraction
,
bg_label
):
"""
Return `num_samples` (or fewer, if not enough found)
random samples from `labels` which is a mixture of positives & negatives.
It will try to return as many positives as possible without
exceeding `positive_fraction * num_samples`, and then try to
fill the remaining slots with negatives.
Args:
labels (Tensor): (N, ) label vector with values:
* -1: ignore
* bg_label: background ("negative") class
* otherwise: one or more foreground ("positive") classes
num_samples (int): The total number of labels with value >= 0 to return.
Values that are not sampled will be filled with -1 (ignore).
positive_fraction (float): The number of subsampled labels with values > 0
is `min(num_positives, int(positive_fraction * num_samples))`. The number
of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
In order words, if there are not enough positives, the sample is filled with
negatives. If there are also not enough negatives, then as many elements are
sampled as is possible.
bg_label (int): label index of background ("negative") class.
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive
=
torch
.
nonzero
((
labels
!=
-
1
)
&
(
labels
!=
bg_label
),
as_tuple
=
True
)[
0
]
negative
=
torch
.
nonzero
(
labels
==
bg_label
,
as_tuple
=
True
)[
0
]
num_pos
=
int
(
num_samples
*
positive_fraction
)
# protect against not enough positive examples
num_pos
=
min
(
positive
.
numel
(),
num_pos
)
num_neg
=
num_samples
-
num_pos
# protect against not enough negative examples
num_neg
=
min
(
negative
.
numel
(),
num_neg
)
# randomly select positive and negative examples
perm1
=
torch
.
randperm
(
positive
.
numel
(),
device
=
positive
.
device
)[:
num_pos
]
perm2
=
torch
.
randperm
(
negative
.
numel
(),
device
=
negative
.
device
)[:
num_neg
]
pos_idx
=
positive
[
perm1
]
neg_idx
=
negative
[
perm2
]
return
pos_idx
,
neg_idx
Prev
1
…
7
8
9
10
11
12
13
14
15
…
22
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment