custom_3d_seg.py 17.6 KB
Newer Older
1
2
3
import mmcv
import numpy as np
import tempfile
4
import warnings
5
6
7
8
9
from os import path as osp
from torch.utils.data import Dataset

from mmdet.datasets import DATASETS
from .pipelines import Compose
10
from .utils import get_loading_pipeline
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145


@DATASETS.register_module()
class Custom3DSegDataset(Dataset):
    """Customized 3D dataset for semantic segmentation task.

    This is the base dataset of ScanNet and S3DIS dataset.

    Args:
        data_root (str): Path of dataset root.
        ann_file (str): Path of annotation file.
        pipeline (list[dict], optional): Pipeline used for data processing.
            Defaults to None.
        classes (tuple[str], optional): Classes used in the dataset.
            Defaults to None.
        palette (list[list[int]], optional): The palette of segmentation map.
            Defaults to None.
        modality (dict, optional): Modality to specify the sensor data used
            as input. Defaults to None.
        test_mode (bool, optional): Whether the dataset is in test mode.
            Defaults to False.
        ignore_index (int, optional): The label index to be ignored, e.g. \
            unannotated points. If None is given, set to len(self.CLASSES) to
            be consistent with PointSegClassMapping function in pipeline.
            Defaults to None.
        scene_idxs (np.ndarray | str, optional): Precomputed index to load
            data. For scenes with many points, we may sample it several times.
            Defaults to None.
        label_weight (np.ndarray | str, optional): Precomputed weight to \
            balance loss calculation. If None is given, use equal weighting.
            Defaults to None.
    """
    # names of all classes data used for the task
    CLASSES = None

    # class_ids used for training
    VALID_CLASS_IDS = None

    # all possible class_ids in loaded segmentation mask
    ALL_CLASS_IDS = None

    # official color for visualization
    PALETTE = None

    def __init__(self,
                 data_root,
                 ann_file,
                 pipeline=None,
                 classes=None,
                 palette=None,
                 modality=None,
                 test_mode=False,
                 ignore_index=None,
                 scene_idxs=None,
                 label_weight=None):
        super().__init__()
        self.data_root = data_root
        self.ann_file = ann_file
        self.test_mode = test_mode
        self.modality = modality

        self.data_infos = self.load_annotations(self.ann_file)

        if pipeline is not None:
            self.pipeline = Compose(pipeline)

        self.ignore_index = len(self.CLASSES) if \
            ignore_index is None else ignore_index

        self.scene_idxs, self.label_weight = \
            self.get_scene_idxs_and_label_weight(scene_idxs, label_weight)
        self.CLASSES, self.PALETTE = \
            self.get_classes_and_palette(classes, palette)

        # set group flag for the sampler
        if not self.test_mode:
            self._set_group_flag()

    def load_annotations(self, ann_file):
        """Load annotations from ann_file.

        Args:
            ann_file (str): Path of the annotation file.

        Returns:
            list[dict]: List of annotations.
        """
        return mmcv.load(ann_file)

    def get_data_info(self, index):
        """Get data info according to the given index.

        Args:
            index (int): Index of the sample data to get.

        Returns:
            dict: Data information that will be passed to the data \
                preprocessing pipelines. It includes the following keys:

                - sample_idx (str): Sample index.
                - pts_filename (str): Filename of point clouds.
                - file_name (str): Filename of point clouds.
                - ann_info (dict): Annotation info.
        """
        info = self.data_infos[index]
        sample_idx = info['point_cloud']['lidar_idx']
        pts_filename = osp.join(self.data_root, info['pts_path'])

        input_dict = dict(
            pts_filename=pts_filename,
            sample_idx=sample_idx,
            file_name=pts_filename)

        if not self.test_mode:
            annos = self.get_ann_info(index)
            input_dict['ann_info'] = annos
        return input_dict

    def pre_pipeline(self, results):
        """Initialization before data preparation.

        Args:
            results (dict): Dict before data preprocessing.

                - img_fields (list): Image fields.
                - pts_mask_fields (list): Mask fields of points.
                - pts_seg_fields (list): Mask fields of point segments.
                - mask_fields (list): Fields of masks.
                - seg_fields (list): Segment fields.
        """
        results['img_fields'] = []
        results['pts_mask_fields'] = []
        results['pts_seg_fields'] = []
        results['mask_fields'] = []
        results['seg_fields'] = []
146
        results['gt_bboxes_3d'] = []
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

    def prepare_train_data(self, index):
        """Training data preparation.

        Args:
            index (int): Index for accessing the target data.

        Returns:
            dict: Training data dict of the corresponding index.
        """
        input_dict = self.get_data_info(index)
        if input_dict is None:
            return None
        self.pre_pipeline(input_dict)
        example = self.pipeline(input_dict)
        return example

    def prepare_test_data(self, index):
        """Prepare data for testing.

        Args:
            index (int): Index for accessing the target data.

        Returns:
            dict: Testing data dict of the corresponding index.
        """
        input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = self.pipeline(input_dict)
        return example

    def get_classes_and_palette(self, classes=None, palette=None):
        """Get class names of current dataset.

        This function is taken from MMSegmentation.

        Args:
            classes (Sequence[str] | str | None): If classes is None, use
                default CLASSES defined by builtin dataset. If classes is a
                string, take it as a file name. The file contains the name of
                classes where each line contains one class name. If classes is
                a tuple or list, override the CLASSES defined by the dataset.
                Defaults to None.
            palette (Sequence[Sequence[int]]] | np.ndarray | None):
                The palette of segmentation map. If None is given, random
                palette will be generated. Defaults to None.
        """
        if classes is None:
            self.custom_classes = False
            # map id in the loaded mask to label used for training
            self.label_map = {
                cls_id: self.ignore_index
                for cls_id in self.ALL_CLASS_IDS
            }
            self.label_map.update(
                {cls_id: i
                 for i, cls_id in enumerate(self.VALID_CLASS_IDS)})
            # map label to category name
            self.label2cat = {
                i: cat_name
                for i, cat_name in enumerate(self.CLASSES)
            }
            return self.CLASSES, self.PALETTE

        self.custom_classes = True
        if isinstance(classes, str):
            # take it as a file path
            class_names = mmcv.list_from_file(classes)
        elif isinstance(classes, (tuple, list)):
            class_names = classes
        else:
            raise ValueError(f'Unsupported type {type(classes)} of classes.')

        if self.CLASSES:
            if not set(class_names).issubset(self.CLASSES):
                raise ValueError('classes is not a subset of CLASSES.')

            # update valid_class_ids
            self.VALID_CLASS_IDS = [
                self.VALID_CLASS_IDS[self.CLASSES.index(cls_name)]
                for cls_name in class_names
            ]

            # dictionary, its keys are the old label ids and its values
            # are the new label ids.
            # used for changing pixel labels in load_annotations.
            self.label_map = {
                cls_id: self.ignore_index
                for cls_id in self.ALL_CLASS_IDS
            }
            self.label_map.update(
                {cls_id: i
                 for i, cls_id in enumerate(self.VALID_CLASS_IDS)})
            self.label2cat = {
                i: cat_name
                for i, cat_name in enumerate(class_names)
            }

        # modify palette for visualization
        palette = [
            self.PALETTE[self.CLASSES.index(cls_name)]
            for cls_name in class_names
        ]

        # also need to modify self.label_weight
        self.label_weight = np.array([
            self.label_weight[self.CLASSES.index(cls_name)]
            for cls_name in class_names
        ]).astype(np.float32)

        return class_names, palette

    def get_scene_idxs_and_label_weight(self, scene_idxs, label_weight):
        """Compute scene_idxs for data sampling and label weight for loss \
        calculation.

        We sample more times for scenes with more points. Label_weight is
        inversely proportional to number of class points.
        """
        if self.test_mode:
            # when testing, we load one whole scene every time
            # and we don't need label weight for loss calculation
            return np.arange(len(self.data_infos)).astype(np.int32), \
                np.ones(len(self.CLASSES)).astype(np.float32)

272
273
        # we may need to re-sample different scenes according to scene_idxs
        # this is necessary for indoor scene segmentation such as ScanNet
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
        if scene_idxs is None:
            scene_idxs = np.arange(len(self.data_infos))
        if isinstance(scene_idxs, str):
            scene_idxs = np.load(scene_idxs)
        else:
            scene_idxs = np.array(scene_idxs)

        if label_weight is None:
            # we don't used label weighting in training
            label_weight = np.ones(len(self.CLASSES))
        elif isinstance(label_weight, str):
            label_weight = np.load(label_weight)
        else:
            label_weight = np.array(label_weight)

        return scene_idxs.astype(np.int32), label_weight.astype(np.float32)

    def format_results(self,
                       outputs,
                       pklfile_prefix=None,
                       submission_prefix=None):
        """Format the results to pkl file.

        Args:
            outputs (list[dict]): Testing results of the dataset.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.

        Returns:
            tuple: (outputs, tmp_dir), outputs is the detection results, \
                tmp_dir is the temporal directory created for saving json \
                files when ``jsonfile_prefix`` is not specified.
        """
        if pklfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            pklfile_prefix = osp.join(tmp_dir.name, 'results')
            out = f'{pklfile_prefix}.pkl'
        mmcv.dump(outputs, out)
        return outputs, tmp_dir

    def evaluate(self,
                 results,
                 metric=None,
                 logger=None,
                 show=False,
320
321
                 out_dir=None,
                 pipeline=None):
322
323
324
325
326
327
328
329
330
331
332
333
334
        """Evaluate.

        Evaluation in semantic segmentation protocol.

        Args:
            results (list[dict]): List of results.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Defaults to None.
            show (bool, optional): Whether to visualize.
                Defaults to False.
            out_dir (str, optional): Path to save the visualization results.
                Defaults to None.
335
336
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.
337
338
339
340
341
342
343
344
345
346
347
348

        Returns:
            dict: Evaluation results.
        """
        from mmdet3d.core.evaluation import seg_eval
        assert isinstance(
            results, list), f'Expect results to be list, got {type(results)}.'
        assert len(results) > 0, 'Expect length of results > 0.'
        assert len(results) == len(self.data_infos)
        assert isinstance(
            results[0], dict
        ), f'Expect elements in results to be dict, got {type(results[0])}.'
349
350

        load_pipeline = self._get_pipeline(pipeline)
351
352
        pred_sem_masks = [result['semantic_mask'] for result in results]
        gt_sem_masks = [
353
354
355
            self._extract_data(
                i, load_pipeline, 'pts_semantic_mask', load_annos=True)
            for i in range(len(self.data_infos))
356
357
358
359
360
361
362
        ]
        ret_dict = seg_eval(
            gt_sem_masks,
            pred_sem_masks,
            self.label2cat,
            self.ignore_index,
            logger=logger)
363

364
        if show:
365
            self.show(pred_sem_masks, out_dir, pipeline=pipeline)
366
367
368
369
370
371
372
373
374
375
376
377

        return ret_dict

    def _rand_another(self, idx):
        """Randomly get another item with the same flag.

        Returns:
            int: Another index of item with the same flag.
        """
        pool = np.where(self.flag == self.flag[idx])[0]
        return np.random.choice(pool)

378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    def _build_default_pipeline(self):
        """Build the default pipeline for this dataset."""
        raise NotImplementedError('_build_default_pipeline is not implemented '
                                  f'for dataset {self.__class__.__name__}')

    def _get_pipeline(self, pipeline):
        """Get data loading pipeline in self.show/evaluate function.

        Args:
            pipeline (list[dict] | None): Input pipeline. If None is given, \
                get from self.pipeline.
        """
        if pipeline is None:
            if not hasattr(self, 'pipeline') or self.pipeline is None:
                warnings.warn(
                    'Use default pipeline for data loading, this may cause '
                    'errors when data is on ceph')
                return self._build_default_pipeline()
            loading_pipeline = get_loading_pipeline(self.pipeline.transforms)
            return Compose(loading_pipeline)
        return Compose(pipeline)

    @staticmethod
    def _get_data(results, key):
        """Extract and return the data corresponding to key in result dict.

        Args:
            results (dict): Data loaded using pipeline.
            key (str): Key of the desired data.

        Returns:
            np.ndarray | torch.Tensor | None: Data term.
        """
        if key not in results.keys():
            return None
        # results[key] may be data or list[data]
        # data may be wrapped inside DataContainer
        data = results[key]
        if isinstance(data, list) or isinstance(data, tuple):
            data = data[0]
        if isinstance(data, mmcv.parallel.DataContainer):
            data = data._data
        return data

    def _extract_data(self, index, pipeline, key, load_annos=False):
        """Load data using input pipeline and extract data according to key.

        Args:
            index (int): Index for accessing the target data.
            pipeline (:obj:`Compose`): Composed data loading pipeline.
            key (str | list[str]): One single or a list of data key.
            load_annos (bool): Whether to load data annotations.
                If True, need to set self.test_mode as False before loading.

        Returns:
            np.ndarray | torch.Tensor | list[np.ndarray | torch.Tensor]:
                A single or a list of loaded data.
        """
        assert pipeline is not None, 'data loading pipeline is not provided'
        # when we want to load ground-truth via pipeline (e.g. bbox, seg mask)
        # we need to set self.test_mode as False so that we have 'annos'
        if load_annos:
            original_test_mode = self.test_mode
            self.test_mode = False
        input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = pipeline(input_dict)

        # extract data items according to keys
        if isinstance(key, str):
            data = self._get_data(example, key)
        else:
            data = [self._get_data(example, k) for k in key]
        if load_annos:
            self.test_mode = original_test_mode

        return data

456
457
458
459
460
461
462
463
464
465
466
    def __len__(self):
        """Return the length of scene_idxs.

        Returns:
            int: Length of data infos.
        """
        return len(self.scene_idxs)

    def __getitem__(self, idx):
        """Get item from infos according to the given index.

467
468
469
470
        In indoor scene segmentation task, each scene contains millions of
        points. However, we only sample less than 10k points within a patch
        each time. Therefore, we use `scene_idxs` to re-sample different rooms.

471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
        Returns:
            dict: Data dictionary of the corresponding index.
        """
        scene_idx = self.scene_idxs[idx]  # map to scene idx
        if self.test_mode:
            return self.prepare_test_data(scene_idx)
        while True:
            data = self.prepare_train_data(scene_idx)
            if data is None:
                idx = self._rand_another(idx)
                scene_idx = self.scene_idxs[idx]  # map to scene idx
                continue
            return data

    def _set_group_flag(self):
        """Set flag according to image aspect ratio.

        Images with aspect ratio greater than 1 will be set as group 1,
        otherwise group 0. In 3D datasets, they are all the same, thus are all
        zeros.
        """
        self.flag = np.zeros(len(self), dtype=np.uint8)