vectormapnet.py 8.32 KB
Newer Older
yeshenglong1's avatar
yeshenglong1 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
import mmcv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torchvision.models.resnet import resnet18, resnet50

from mmdet3d.models.builder import (build_backbone, build_head,
                                    build_neck)

from .base_mapper import BaseMapper, MAPPERS


@MAPPERS.register_module()
class VectorMapNet(BaseMapper):

    def __init__(self,
                 backbone_cfg=dict(),
                 head_cfg=dict(
                     vert_net_cfg=dict(),
                     face_net_cfg=dict(),
                 ),
                 neck_input_channels=128,
                 neck_cfg=None,
                 with_auxiliary_head=False,
                 only_det=False,
                 train_cfg=None,
                 test_cfg=None,
                 pretrained=None,
                 model_name=None, **kwargs):
        super(VectorMapNet, self).__init__()

        
        #Attribute
        self.model_name = model_name
        self.last_epoch = None
        self.only_det = only_det
  
        self.backbone = build_backbone(backbone_cfg)

        if neck_cfg is not None:
            self.neck_neck = build_backbone(neck_cfg.backbone)
            self.neck_neck.conv1 = nn.Conv2d(
                neck_input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
            self.neck_project = build_neck(neck_cfg.neck)
            self.neck = self.multiscale_neck
        else:
            trunk = resnet18(pretrained=False, zero_init_residual=True)
            self.neck = nn.Sequential(
                nn.Conv2d(neck_input_channels, 64, kernel_size=(7, 7), stride=(
                    2, 2), padding=(3, 3), bias=False),
                nn.BatchNorm2d(64),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1,
                            dilation=1, ceil_mode=False),
                trunk.layer1,
                nn.Conv2d(64, 128, kernel_size=1, bias=False),
            )
        
        # BEV 
        if hasattr(self.backbone,'bev_w'):
            self.bev_w = self.backbone.bev_w
            self.bev_h = self.backbone.bev_h


        self.head = build_head(head_cfg)

    def multiscale_neck(self, bev_embedding):

        multi_feat = self.neck_neck(bev_embedding)
        multi_feat = self.neck_project(multi_feat)

        return multi_feat

    def forward_train(self, img, polys, points=None, img_metas=None, **kwargs):
        '''
        Args:
            img: torch.Tensor of shape [B, N, 3, H, W]
                N: number of cams
            vectors: list[list[Tuple(lines, length, label)]]
                - lines: np.array of shape [num_points, 2]. 
                - length: int
                - label: int
                len(vectors) = batch_size
                len(vectors[_b]) = num of lines in sample _b
            img_metas: 
                img_metas['lidar2img']: [B, N, 4, 4]
        Out:
            loss, log_vars, num_sample
        '''
        #  prepare labels and images
        batch, img, img_metas, valid_idx, points = self.batch_data(
            polys, img, img_metas, img.device, points)
        
        # corner cases use hard code to prevent code fail
        if self.last_epoch is None:
            self.last_epoch = [batch, img, img_metas, valid_idx, points]

        if len(valid_idx)==0:
            batch, img, img_metas, valid_idx, points = self.last_epoch
        else:
            del self.last_epoch
            self.last_epoch = [batch, img, img_metas, valid_idx, points]

        # Backbone
        _bev_feats = self.backbone(img, img_metas=img_metas, points=points)
        img_shape = \
            [_bev_feats.shape[2:] for i in range(_bev_feats.shape[0])]

        # Neck
        bev_feats = self.neck(_bev_feats)
        
        preds_dict, losses_dict = \
            self.head(batch, 
                      context={
                        'bev_embeddings': bev_feats, 
                        'batch_input_shape': _bev_feats.shape[2:], 
                        'img_shape': img_shape,
                        'raw_bev_embeddings': _bev_feats},
                        only_det=self.only_det)

        # format outputs
        loss = 0
        for name, var in losses_dict.items():
            loss = loss + var

        # update the log
        log_vars = {k: v.item() for k, v in losses_dict.items()}
        log_vars.update({'total': loss.item()})

        num_sample = img.size(0)

        return loss, log_vars, num_sample

    @torch.no_grad()
    def forward_test(self, img, polys=None, points=None, img_metas=None, **kwargs):
        '''
            inference pipeline
        '''

        #  prepare labels and images
        token = []
        for img_meta in img_metas:
            token.append(img_meta['token'])

        _bev_feats = self.backbone(img, img_metas, points=points)
        img_shape = [_bev_feats.shape[2:] for i in range(_bev_feats.shape[0])]
        # Neck
        bev_feats = self.neck(_bev_feats)

        context = {'bev_embeddings': bev_feats,
                   'batch_input_shape': _bev_feats.shape[2:], 
                   'img_shape': img_shape, # XXX
                   'raw_bev_embeddings': _bev_feats}

        preds_dict = self.head(batch={},
                               context=context,
                               condition_on_det=True, 
                               gt_condition=False,
                               only_det=self.only_det)

        # Hard Code
        if preds_dict is None:
            return [None]

        results_list = self.head.post_process(preds_dict, token, only_det=self.only_det)

        return results_list

    def batch_data(self, polys, imgs, img_metas, device, points=None):
        # filter none vector's case
        valid_idx = [i for i in range(len(polys)) if len(polys[i])]
        imgs = imgs[valid_idx]
        img_metas = [img_metas[i] for i in valid_idx]
        
        polys = [polys[i] for i in valid_idx]

        if points is not None:
            points = [points[i] for i in valid_idx]
            points = self.batch_points(points)

        if len(valid_idx) == 0:
            return None, None, None, valid_idx, None

        batch = {}
        batch['det'] = format_det(polys,device)
        batch['gen'] = format_gen(polys,device)

        return batch, imgs, img_metas, valid_idx, points

    def batch_points(self, points):
    
        pad_points = pad_sequence(points, batch_first=True)

        points_mask = torch.zeros_like(pad_points[:,:,0]).bool()
        for i in range(len(points)):
            valid_num = points[i].shape[0]
            points_mask[i][:valid_num] = True

        return (pad_points, points_mask)


def format_det(polys, device):
    
    batch = {
        'class_label':[],
        'batch_idx':[],
        'bbox': [],
    }

    for batch_idx, poly in enumerate(polys):

        keypoint_label = torch.from_numpy(poly['det_label']).to(device)
        keypoint = torch.from_numpy(poly['keypoint']).to(device)
        
        batch['class_label'].append(keypoint_label)
        batch['bbox'].append(keypoint)
    
    return batch
        
 
def format_gen(polys,device):

    line_cls = []
    polylines, polyline_masks, polyline_weights = [], [], []
    bbox, line_cls, line_bs_idx = [], [], []
    
    for batch_idx, poly in enumerate(polys):
    
        # convert to cuda tensor
        for k in poly.keys():
            if isinstance(poly[k],np.ndarray):
                poly[k] = torch.from_numpy(poly[k]).to(device)
            else:
                poly[k] = [torch.from_numpy(v).to(device) for v in poly[k]]
        
        line_cls += poly['gen_label']
        line_bs_idx += [batch_idx]*len(poly['gen_label'])

        # condition
        bbox += poly['qkeypoint']

        # out
        polylines += poly['polylines']
        polyline_masks += poly['polyline_masks']
        polyline_weights += poly['polyline_weights']

    batch = {}
    batch['lines_bs_idx'] = torch.tensor(
        line_bs_idx, dtype=torch.long, device=device)
    batch['lines_cls'] = torch.tensor(
        line_cls, dtype=torch.long, device=device)
    batch['bbox_flat'] = torch.stack(bbox, 0)

    # padding
    batch['polylines'] = pad_sequence(polylines, batch_first=True)
    batch['polyline_masks'] = pad_sequence(polyline_masks, batch_first=True)
    batch['polyline_weights'] = pad_sequence(polyline_weights, batch_first=True)
    
YeShenglong1's avatar
YeShenglong1 committed
261
    return batch