test_mixins.py 7.21 KB
Newer Older
Wenwei Zhang's avatar
Wenwei Zhang committed
1
2
import torch

3
4
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
                        merge_aug_masks, merge_aug_proposals, multiclass_nms)
Kai Chen's avatar
Kai Chen committed
5
6
7
8
9
10
11


class RPNTestMixin(object):

    def simple_test_rpn(self, x, img_meta, rpn_test_cfg):
        rpn_outs = self.rpn_head(x)
        proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg)
12
        proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
Kai Chen's avatar
Kai Chen committed
13
14
15
16
17
18
19
20
21
        return proposal_list

    def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
        imgs_per_gpu = len(img_metas[0])
        aug_proposals = [[] for _ in range(imgs_per_gpu)]
        for x, img_meta in zip(feats, img_metas):
            proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
            for i, proposals in enumerate(proposal_list):
                aug_proposals[i].append(proposals)
sty-yyj's avatar
sty-yyj committed
22
23
24
25
26
27
28
29
        # reorganize the order of 'img_metas' to match the dimensions
        # of 'aug_proposals'
        aug_img_metas = []
        for i in range(imgs_per_gpu):
            aug_img_meta = []
            for j in range(len(img_metas)):
                aug_img_meta.append(img_metas[j][i])
            aug_img_metas.append(aug_img_meta)
Kai Chen's avatar
Kai Chen committed
30
31
        # after merging, proposals will be rescaled to the original image size
        merged_proposals = [
sty-yyj's avatar
sty-yyj committed
32
33
            merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
            for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
Kai Chen's avatar
Kai Chen committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
        ]
        return merged_proposals


class BBoxTestMixin(object):

    def simple_test_bboxes(self,
                           x,
                           img_meta,
                           proposals,
                           rcnn_test_cfg,
                           rescale=False):
        """Test only det bboxes without augmentation."""
        rois = bbox2roi(proposals)
        roi_feats = self.bbox_roi_extractor(
            x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
myownskyW7's avatar
myownskyW7 committed
50
51
        if self.with_shared_head:
            roi_feats = self.shared_head(roi_feats)
Kai Chen's avatar
Kai Chen committed
52
53
54
55
56
57
58
59
60
61
        cls_score, bbox_pred = self.bbox_head(roi_feats)
        img_shape = img_meta[0]['img_shape']
        scale_factor = img_meta[0]['scale_factor']
        det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
            rois,
            cls_score,
            bbox_pred,
            img_shape,
            scale_factor,
            rescale=rescale,
62
            cfg=rcnn_test_cfg)
Kai Chen's avatar
Kai Chen committed
63
64
        return det_bboxes, det_labels

65
    def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
Kai Chen's avatar
Kai Chen committed
66
67
68
69
70
71
72
        aug_bboxes = []
        aug_scores = []
        for x, img_meta in zip(feats, img_metas):
            # only one image in the batch
            img_shape = img_meta[0]['img_shape']
            scale_factor = img_meta[0]['scale_factor']
            flip = img_meta[0]['flip']
73
74
75
            # TODO more flexible
            proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
                                     scale_factor, flip)
Kai Chen's avatar
Kai Chen committed
76
77
78
79
            rois = bbox2roi([proposals])
            # recompute feature maps to save GPU memory
            roi_feats = self.bbox_roi_extractor(
                x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
myownskyW7's avatar
myownskyW7 committed
80
81
            if self.with_shared_head:
                roi_feats = self.shared_head(roi_feats)
Kai Chen's avatar
Kai Chen committed
82
83
84
85
86
87
            cls_score, bbox_pred = self.bbox_head(roi_feats)
            bboxes, scores = self.bbox_head.get_det_bboxes(
                rois,
                cls_score,
                bbox_pred,
                img_shape,
88
                scale_factor,
Kai Chen's avatar
Kai Chen committed
89
                rescale=False,
90
                cfg=None)
Kai Chen's avatar
Kai Chen committed
91
92
93
94
            aug_bboxes.append(bboxes)
            aug_scores.append(scores)
        # after merging, bboxes will be rescaled to the original image size
        merged_bboxes, merged_scores = merge_aug_bboxes(
95
            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
96
97
98
99
        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
                                                rcnn_test_cfg.score_thr,
                                                rcnn_test_cfg.nms,
                                                rcnn_test_cfg.max_per_img)
Kai Chen's avatar
Kai Chen committed
100
101
102
103
104
105
106
107
108
109
110
111
        return det_bboxes, det_labels


class MaskTestMixin(object):

    def simple_test_mask(self,
                         x,
                         img_meta,
                         det_bboxes,
                         det_labels,
                         rescale=False):
        # image shape of the first image in the batch (only one)
112
        ori_shape = img_meta[0]['ori_shape']
Kai Chen's avatar
Kai Chen committed
113
114
115
116
117
118
        scale_factor = img_meta[0]['scale_factor']
        if det_bboxes.shape[0] == 0:
            segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
        else:
            # if det_bboxes is rescaled to the original image size, we need to
            # rescale it back to the testing scale to obtain RoIs.
Wenwei Zhang's avatar
Wenwei Zhang committed
119
120
121
            if rescale and not isinstance(scale_factor, float):
                scale_factor = torch.from_numpy(scale_factor).to(
                    det_bboxes.device)
122
123
            _bboxes = (
                det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
Kai Chen's avatar
Kai Chen committed
124
125
126
            mask_rois = bbox2roi([_bboxes])
            mask_feats = self.mask_roi_extractor(
                x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
myownskyW7's avatar
myownskyW7 committed
127
128
            if self.with_shared_head:
                mask_feats = self.shared_head(mask_feats)
Kai Chen's avatar
Kai Chen committed
129
            mask_pred = self.mask_head(mask_feats)
130
131
132
133
134
            segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
                                                       det_labels,
                                                       self.test_cfg.rcnn,
                                                       ori_shape, scale_factor,
                                                       rescale)
Kai Chen's avatar
Kai Chen committed
135
136
        return segm_result

137
    def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
Kai Chen's avatar
Kai Chen committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
        if det_bboxes.shape[0] == 0:
            segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
        else:
            aug_masks = []
            for x, img_meta in zip(feats, img_metas):
                img_shape = img_meta[0]['img_shape']
                scale_factor = img_meta[0]['scale_factor']
                flip = img_meta[0]['flip']
                _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
                                       scale_factor, flip)
                mask_rois = bbox2roi([_bboxes])
                mask_feats = self.mask_roi_extractor(
                    x[:len(self.mask_roi_extractor.featmap_strides)],
                    mask_rois)
myownskyW7's avatar
myownskyW7 committed
152
153
                if self.with_shared_head:
                    mask_feats = self.shared_head(mask_feats)
Kai Chen's avatar
Kai Chen committed
154
155
156
157
                mask_pred = self.mask_head(mask_feats)
                # convert to numpy array to save memory
                aug_masks.append(mask_pred.sigmoid().cpu().numpy())
            merged_masks = merge_aug_masks(aug_masks, img_metas,
158
159
160
                                           self.test_cfg.rcnn)

            ori_shape = img_metas[0][0]['ori_shape']
Kai Chen's avatar
Kai Chen committed
161
            segm_result = self.mask_head.get_seg_masks(
pangjm's avatar
pangjm committed
162
163
164
165
166
167
168
                merged_masks,
                det_bboxes,
                det_labels,
                self.test_cfg.rcnn,
                ori_shape,
                scale_factor=1.0,
                rescale=False)
Kai Chen's avatar
Kai Chen committed
169
        return segm_result