vectormapnet.py 8.15 KB
Newer Older
yeshenglong1's avatar
yeshenglong1 committed
1
2
3
import numpy as np
import torch
import torch.nn as nn
zhe chen's avatar
zhe chen committed
4
from mmdet3d.models.builder import build_backbone, build_head, build_neck
yeshenglong1's avatar
yeshenglong1 committed
5
from torch.nn.utils.rnn import pad_sequence
zhe chen's avatar
zhe chen committed
6
from torchvision.models.resnet import resnet18
yeshenglong1's avatar
yeshenglong1 committed
7

zhe chen's avatar
zhe chen committed
8
from .base_mapper import MAPPERS, BaseMapper
yeshenglong1's avatar
yeshenglong1 committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29


@MAPPERS.register_module()
class VectorMapNet(BaseMapper):

    def __init__(self,
                 backbone_cfg=dict(),
                 head_cfg=dict(
                     vert_net_cfg=dict(),
                     face_net_cfg=dict(),
                 ),
                 neck_input_channels=128,
                 neck_cfg=None,
                 with_auxiliary_head=False,
                 only_det=False,
                 train_cfg=None,
                 test_cfg=None,
                 pretrained=None,
                 model_name=None, **kwargs):
        super(VectorMapNet, self).__init__()

zhe chen's avatar
zhe chen committed
30
        # Attribute
yeshenglong1's avatar
yeshenglong1 committed
31
32
33
        self.model_name = model_name
        self.last_epoch = None
        self.only_det = only_det
zhe chen's avatar
zhe chen committed
34

yeshenglong1's avatar
yeshenglong1 committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
        self.backbone = build_backbone(backbone_cfg)

        if neck_cfg is not None:
            self.neck_neck = build_backbone(neck_cfg.backbone)
            self.neck_neck.conv1 = nn.Conv2d(
                neck_input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
            self.neck_project = build_neck(neck_cfg.neck)
            self.neck = self.multiscale_neck
        else:
            trunk = resnet18(pretrained=False, zero_init_residual=True)
            self.neck = nn.Sequential(
                nn.Conv2d(neck_input_channels, 64, kernel_size=(7, 7), stride=(
                    2, 2), padding=(3, 3), bias=False),
                nn.BatchNorm2d(64),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1,
zhe chen's avatar
zhe chen committed
51
                             dilation=1, ceil_mode=False),
yeshenglong1's avatar
yeshenglong1 committed
52
53
54
                trunk.layer1,
                nn.Conv2d(64, 128, kernel_size=1, bias=False),
            )
zhe chen's avatar
zhe chen committed
55
56
57

        # BEV
        if hasattr(self.backbone, 'bev_w'):
yeshenglong1's avatar
yeshenglong1 committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
            self.bev_w = self.backbone.bev_w
            self.bev_h = self.backbone.bev_h

        self.head = build_head(head_cfg)

    def multiscale_neck(self, bev_embedding):

        multi_feat = self.neck_neck(bev_embedding)
        multi_feat = self.neck_project(multi_feat)

        return multi_feat

    def forward_train(self, img, polys, points=None, img_metas=None, **kwargs):
        '''
        Args:
            img: torch.Tensor of shape [B, N, 3, H, W]
                N: number of cams
            vectors: list[list[Tuple(lines, length, label)]]
zhe chen's avatar
zhe chen committed
76
                - lines: np.array of shape [num_points, 2].
yeshenglong1's avatar
yeshenglong1 committed
77
78
79
80
                - length: int
                - label: int
                len(vectors) = batch_size
                len(vectors[_b]) = num of lines in sample _b
zhe chen's avatar
zhe chen committed
81
            img_metas:
yeshenglong1's avatar
yeshenglong1 committed
82
83
84
85
86
87
88
                img_metas['lidar2img']: [B, N, 4, 4]
        Out:
            loss, log_vars, num_sample
        '''
        #  prepare labels and images
        batch, img, img_metas, valid_idx, points = self.batch_data(
            polys, img, img_metas, img.device, points)
zhe chen's avatar
zhe chen committed
89

yeshenglong1's avatar
yeshenglong1 committed
90
91
92
93
        # corner cases use hard code to prevent code fail
        if self.last_epoch is None:
            self.last_epoch = [batch, img, img_metas, valid_idx, points]

zhe chen's avatar
zhe chen committed
94
        if len(valid_idx) == 0:
yeshenglong1's avatar
yeshenglong1 committed
95
96
97
98
99
100
101
102
103
104
105
106
            batch, img, img_metas, valid_idx, points = self.last_epoch
        else:
            del self.last_epoch
            self.last_epoch = [batch, img, img_metas, valid_idx, points]

        # Backbone
        _bev_feats = self.backbone(img, img_metas=img_metas, points=points)
        img_shape = \
            [_bev_feats.shape[2:] for i in range(_bev_feats.shape[0])]

        # Neck
        bev_feats = self.neck(_bev_feats)
zhe chen's avatar
zhe chen committed
107

yeshenglong1's avatar
yeshenglong1 committed
108
        preds_dict, losses_dict = \
zhe chen's avatar
zhe chen committed
109
            self.head(batch,
yeshenglong1's avatar
yeshenglong1 committed
110
                      context={
zhe chen's avatar
zhe chen committed
111
112
113
114
115
                          'bev_embeddings': bev_feats,
                          'batch_input_shape': _bev_feats.shape[2:],
                          'img_shape': img_shape,
                          'raw_bev_embeddings': _bev_feats},
                      only_det=self.only_det)
yeshenglong1's avatar
yeshenglong1 committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

        # format outputs
        loss = 0
        for name, var in losses_dict.items():
            loss = loss + var

        # update the log
        log_vars = {k: v.item() for k, v in losses_dict.items()}
        log_vars.update({'total': loss.item()})

        num_sample = img.size(0)

        return loss, log_vars, num_sample

    @torch.no_grad()
    def forward_test(self, img, polys=None, points=None, img_metas=None, **kwargs):
        '''
            inference pipeline
        '''

        #  prepare labels and images
        token = []
        for img_meta in img_metas:
            token.append(img_meta['token'])

        _bev_feats = self.backbone(img, img_metas, points=points)
        img_shape = [_bev_feats.shape[2:] for i in range(_bev_feats.shape[0])]
        # Neck
        bev_feats = self.neck(_bev_feats)

        context = {'bev_embeddings': bev_feats,
zhe chen's avatar
zhe chen committed
147
148
                   'batch_input_shape': _bev_feats.shape[2:],
                   'img_shape': img_shape,  # XXX
yeshenglong1's avatar
yeshenglong1 committed
149
150
151
152
                   'raw_bev_embeddings': _bev_feats}

        preds_dict = self.head(batch={},
                               context=context,
zhe chen's avatar
zhe chen committed
153
                               condition_on_det=True,
yeshenglong1's avatar
yeshenglong1 committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
                               gt_condition=False,
                               only_det=self.only_det)

        # Hard Code
        if preds_dict is None:
            return [None]

        results_list = self.head.post_process(preds_dict, token, only_det=self.only_det)

        return results_list

    def batch_data(self, polys, imgs, img_metas, device, points=None):
        # filter none vector's case
        valid_idx = [i for i in range(len(polys)) if len(polys[i])]
        imgs = imgs[valid_idx]
        img_metas = [img_metas[i] for i in valid_idx]
zhe chen's avatar
zhe chen committed
170

yeshenglong1's avatar
yeshenglong1 committed
171
172
173
174
175
176
177
178
179
180
        polys = [polys[i] for i in valid_idx]

        if points is not None:
            points = [points[i] for i in valid_idx]
            points = self.batch_points(points)

        if len(valid_idx) == 0:
            return None, None, None, valid_idx, None

        batch = {}
zhe chen's avatar
zhe chen committed
181
182
        batch['det'] = format_det(polys, device)
        batch['gen'] = format_gen(polys, device)
yeshenglong1's avatar
yeshenglong1 committed
183
184
185
186

        return batch, imgs, img_metas, valid_idx, points

    def batch_points(self, points):
zhe chen's avatar
zhe chen committed
187

yeshenglong1's avatar
yeshenglong1 committed
188
189
        pad_points = pad_sequence(points, batch_first=True)

zhe chen's avatar
zhe chen committed
190
        points_mask = torch.zeros_like(pad_points[:, :, 0]).bool()
yeshenglong1's avatar
yeshenglong1 committed
191
192
193
194
195
196
197
198
199
        for i in range(len(points)):
            valid_num = points[i].shape[0]
            points_mask[i][:valid_num] = True

        return (pad_points, points_mask)


def format_det(polys, device):
    batch = {
zhe chen's avatar
zhe chen committed
200
201
        'class_label': [],
        'batch_idx': [],
yeshenglong1's avatar
yeshenglong1 committed
202
203
204
205
206
207
        'bbox': [],
    }

    for batch_idx, poly in enumerate(polys):
        keypoint_label = torch.from_numpy(poly['det_label']).to(device)
        keypoint = torch.from_numpy(poly['keypoint']).to(device)
zhe chen's avatar
zhe chen committed
208

yeshenglong1's avatar
yeshenglong1 committed
209
210
        batch['class_label'].append(keypoint_label)
        batch['bbox'].append(keypoint)
zhe chen's avatar
zhe chen committed
211

yeshenglong1's avatar
yeshenglong1 committed
212
213
    return batch

zhe chen's avatar
zhe chen committed
214
215

def format_gen(polys, device):
yeshenglong1's avatar
yeshenglong1 committed
216
217
218
    line_cls = []
    polylines, polyline_masks, polyline_weights = [], [], []
    bbox, line_cls, line_bs_idx = [], [], []
zhe chen's avatar
zhe chen committed
219

yeshenglong1's avatar
yeshenglong1 committed
220
    for batch_idx, poly in enumerate(polys):
zhe chen's avatar
zhe chen committed
221

yeshenglong1's avatar
yeshenglong1 committed
222
223
        # convert to cuda tensor
        for k in poly.keys():
zhe chen's avatar
zhe chen committed
224
            if isinstance(poly[k], np.ndarray):
yeshenglong1's avatar
yeshenglong1 committed
225
226
227
                poly[k] = torch.from_numpy(poly[k]).to(device)
            else:
                poly[k] = [torch.from_numpy(v).to(device) for v in poly[k]]
zhe chen's avatar
zhe chen committed
228

yeshenglong1's avatar
yeshenglong1 committed
229
        line_cls += poly['gen_label']
zhe chen's avatar
zhe chen committed
230
        line_bs_idx += [batch_idx] * len(poly['gen_label'])
yeshenglong1's avatar
yeshenglong1 committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250

        # condition
        bbox += poly['qkeypoint']

        # out
        polylines += poly['polylines']
        polyline_masks += poly['polyline_masks']
        polyline_weights += poly['polyline_weights']

    batch = {}
    batch['lines_bs_idx'] = torch.tensor(
        line_bs_idx, dtype=torch.long, device=device)
    batch['lines_cls'] = torch.tensor(
        line_cls, dtype=torch.long, device=device)
    batch['bbox_flat'] = torch.stack(bbox, 0)

    # padding
    batch['polylines'] = pad_sequence(polylines, batch_first=True)
    batch['polyline_masks'] = pad_sequence(polyline_masks, batch_first=True)
    batch['polyline_weights'] = pad_sequence(polyline_weights, batch_first=True)
zhe chen's avatar
zhe chen committed
251
252

    return batch