mvx_faster_rcnn.py 2.87 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
2
3
import torch
import torch.nn.functional as F

zhangwenwei's avatar
zhangwenwei committed
4
from mmdet.models import DETECTORS
zhangwenwei's avatar
zhangwenwei committed
5
6
7
from .mvx_two_stage import MVXTwoStageDetector


8
@DETECTORS.register_module()
zhangwenwei's avatar
zhangwenwei committed
9
10
11
12
13
class DynamicMVXFasterRCNN(MVXTwoStageDetector):

    def __init__(self, **kwargs):
        super(DynamicMVXFasterRCNN, self).__init__(**kwargs)

zhangwenwei's avatar
zhangwenwei committed
14
    def extract_pts_feat(self, points, img_feats, img_metas):
zhangwenwei's avatar
zhangwenwei committed
15
16
17
18
19
        if not self.with_pts_bbox:
            return None
        voxels, coors = self.voxelize(points)
        # adopt an early fusion strategy
        if self.with_fusion:
zhangwenwei's avatar
zhangwenwei committed
20
21
            voxels = self.pts_fusion_layer(img_feats, points, voxels,
                                           img_metas)
zhangwenwei's avatar
zhangwenwei committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
        voxel_features, feature_coors = self.pts_voxel_encoder(voxels, coors)
        batch_size = coors[-1, 0] + 1
        x = self.pts_middle_encoder(voxel_features, feature_coors, batch_size)
        x = self.pts_backbone(x)
        if self.with_pts_neck:
            x = self.pts_neck(x)
        return x

    @torch.no_grad()
    def voxelize(self, points):
        coors = []
        # dynamic voxelization only provide a coors mapping
        for res in points:
            res_coors = self.pts_voxel_layer(res)
            coors.append(res_coors)
        points = torch.cat(points, dim=0)
        coors_batch = []
        for i, coor in enumerate(coors):
            coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
            coors_batch.append(coor_pad)
        coors_batch = torch.cat(coors_batch, dim=0)
        return points, coors_batch


46
@DETECTORS.register_module()
zhangwenwei's avatar
zhangwenwei committed
47
48
49
50
51
class DynamicMVXFasterRCNNV2(DynamicMVXFasterRCNN):

    def __init__(self, **kwargs):
        super(DynamicMVXFasterRCNNV2, self).__init__(**kwargs)

zhangwenwei's avatar
zhangwenwei committed
52
    def extract_pts_feat(self, points, img_feats, img_metas):
zhangwenwei's avatar
zhangwenwei committed
53
54
55
56
        if not self.with_pts_bbox:
            return None
        voxels, coors = self.voxelize(points)
        voxel_features, feature_coors = self.pts_voxel_encoder(
zhangwenwei's avatar
zhangwenwei committed
57
            voxels, coors, points, img_feats, img_metas)
zhangwenwei's avatar
zhangwenwei committed
58
59
60
61
62
63
64
65
        batch_size = coors[-1, 0] + 1
        x = self.pts_middle_encoder(voxel_features, feature_coors, batch_size)
        x = self.pts_backbone(x)
        if self.with_pts_neck:
            x = self.pts_neck(x)
        return x


66
@DETECTORS.register_module()
zhangwenwei's avatar
zhangwenwei committed
67
68
69
70
71
class MVXFasterRCNNV2(MVXTwoStageDetector):

    def __init__(self, **kwargs):
        super(MVXFasterRCNNV2, self).__init__(**kwargs)

zhangwenwei's avatar
zhangwenwei committed
72
    def extract_pts_feat(self, pts, img_feats, img_metas):
zhangwenwei's avatar
zhangwenwei committed
73
74
75
76
        if not self.with_pts_bbox:
            return None
        voxels, num_points, coors = self.voxelize(pts)
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors,
zhangwenwei's avatar
zhangwenwei committed
77
                                                img_feats, img_metas)
zhangwenwei's avatar
zhangwenwei committed
78
79
80
81
82
83
84
85

        batch_size = coors[-1, 0] + 1
        x = self.pts_middle_encoder(voxel_features, coors, batch_size)
        x = self.pts_backbone(x)

        if self.with_pts_neck:
            x = self.pts_neck(x)
        return x