# Copyright (c) OpenMMLab. All rights reserved. import tempfile import warnings from os import path as osp from typing import Callable, List, Union import numpy as np from mmdet3d.core import instance_seg_eval, show_result, show_seg_result from mmdet3d.core.bbox import DepthInstance3DBoxes from mmdet3d.registry import DATASETS from .custom_3d_seg import Custom3DSegDataset from .det3d_dataset import Det3DDataset from .pipelines import Compose @DATASETS.register_module() class ScanNetDataset(Det3DDataset): r"""ScanNet Dataset for Detection Task. This class serves as the API for experiments on the ScanNet Dataset. Please refer to the `github repo `_ for data downloading. Args: data_root (str): Path of dataset root. ann_file (str): Path of annotation file. metainfo (dict, optional): Meta information for dataset, such as class information. Defaults to None. data_prefix (dict): Prefix for data. Defaults to `dict(pts='points', pts_isntance_mask='instance_mask', pts_semantic_mask='semantic_mask')`. pipeline (list[dict]): Pipeline used for data processing. Defaults to None. modality (dict): Modality to specify the sensor data used as input. Defaults to None. box_type_3d (str): Type of 3D box of this dataset. Based on the `box_type_3d`, the dataset will encapsulate the box to its original format then converted them to `box_type_3d`. Defaults to 'Depth' in this dataset. Available options includes - 'LiDAR': Box in LiDAR coordinates. - 'Depth': Box in depth coordinates, usually for indoor dataset. - 'Camera': Box in camera coordinates. filter_empty_gt (bool): Whether to filter empty GT. Defaults to True. test_mode (bool): Whether the dataset is in test mode. Defaults to False. """ METAINFO = { 'CLASSES': ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') } def __init__(self, data_root: str, ann_file: str, metainfo: dict = None, data_prefix: dict = dict( pts='points', pts_isntance_mask='instance_mask', pts_semantic_mask='semantic_mask'), pipeline: List[Union[dict, Callable]] = [], modality=dict(use_camera=False, use_lidar=True), box_type_3d: str = 'Depth', filter_empty_gt: bool = True, test_mode: bool = False, **kwargs): super().__init__( data_root=data_root, ann_file=ann_file, metainfo=metainfo, data_prefix=data_prefix, pipeline=pipeline, modality=modality, box_type_3d=box_type_3d, filter_empty_gt=filter_empty_gt, test_mode=test_mode, **kwargs) assert 'use_camera' in self.modality and \ 'use_lidar' in self.modality assert self.modality['use_camera'] or self.modality['use_lidar'] @staticmethod def _get_axis_align_matrix(info: dict) -> dict: """Get axis_align_matrix from info. If not exist, return identity mat. Args: info (dict): Info of a single sample data. Returns: np.ndarray: 4x4 transformation matrix. """ if 'axis_align_matrix' in info: return np.array(info['axis_align_matrix']) else: warnings.warn( 'axis_align_matrix is not found in ScanNet data info, please ' 'use new pre-process scripts to re-generate ScanNet data') return np.eye(4).astype(np.float32) def parse_data_info(self, info: dict) -> dict: """Process the raw data info. The only difference with it in `Det3DDataset` is the specific process for `axis_align_matrix'. Args: info (dict): Raw info dict. Returns: dict: Data information that will be passed to the data preprocessing pipelines. It includes the following keys: """ # TODO: whether all depth modality is pts ? if self.modality['use_lidar']: info['lidar_points']['lidar_path'] = \ osp.join( self.data_prefix.get('pts', ''), info['lidar_points']['lidar_path']) info['axis_align_matrix'] = self._get_axis_align_matrix(info) info['pts_instance_mask_path'] = osp.join( self.data_prefix.get('pts_instance_mask', ''), info['pts_instance_mask_path']) info['pts_semantic_mask_path'] = osp.join( self.data_prefix.get('pts_semantic_mask', ''), info['pts_semantic_mask_path']) info = super().parse_data_info(info) return info def parse_ann_info(self, info: dict) -> dict: """Process the `instances` in data info to `ann_info` Args: info (dict): Info dict. Returns: dict: Processed `ann_info` """ ann_info = super().parse_ann_info(info) # to target box structure ann_info['gt_bboxes_3d'] = DepthInstance3DBoxes( ann_info['gt_bboxes_3d'], box_dim=ann_info['gt_bboxes_3d'].shape[-1], with_yaw=False, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) return ann_info def _build_default_pipeline(self): """Build the default pipeline for this dataset.""" pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=self.CLASSES, with_label=False), dict(type='Collect3D', keys=['points']) ] return Compose(pipeline) def show(self, results, out_dir, show=True, pipeline=None): """Results visualization. Args: results (list[dict]): List of bounding boxes results. out_dir (str): Output directory of visualization result. show (bool): Visualize the results online. pipeline (list[dict], optional): raw data loading for showing. Default: None. """ assert out_dir is not None, 'Expect out_dir, got none.' pipeline = self._get_pipeline(pipeline) for i, result in enumerate(results): data_info = self.get_data_info[i] pts_path = data_info['lidar_points']['lidar_path'] file_name = osp.split(pts_path)[-1].split('.')[0] points = self._extract_data(i, pipeline, 'points').numpy() gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy() pred_bboxes = result['boxes_3d'].tensor.numpy() show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name, show) @DATASETS.register_module() class ScanNetSegDataset(Custom3DSegDataset): r"""ScanNet Dataset for Semantic Segmentation Task. This class serves as the API for experiments on the ScanNet Dataset. Please refer to the `github repo `_ for data downloading. Args: data_root (str): Path of dataset root. ann_file (str): Path of annotation file. pipeline (list[dict], optional): Pipeline used for data processing. Defaults to None. classes (tuple[str], optional): Classes used in the dataset. Defaults to None. palette (list[list[int]], optional): The palette of segmentation map. Defaults to None. modality (dict, optional): Modality to specify the sensor data used as input. Defaults to None. test_mode (bool, optional): Whether the dataset is in test mode. Defaults to False. ignore_index (int, optional): The label index to be ignored, e.g. unannotated points. If None is given, set to len(self.CLASSES). Defaults to None. scene_idxs (np.ndarray | str, optional): Precomputed index to load data. For scenes with many points, we may sample it several times. Defaults to None. """ CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39) ALL_CLASS_IDS = tuple(range(41)) PALETTE = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] def __init__(self, data_root, ann_file, pipeline=None, classes=None, palette=None, modality=None, test_mode=False, ignore_index=None, scene_idxs=None, **kwargs): super().__init__( data_root=data_root, ann_file=ann_file, pipeline=pipeline, classes=classes, palette=palette, modality=modality, test_mode=test_mode, ignore_index=ignore_index, scene_idxs=scene_idxs, **kwargs) def get_ann_info(self, index): """Get annotation info according to the given index. Args: index (int): Index of the annotation data to get. Returns: dict: annotation information consists of the following keys: - pts_semantic_mask_path (str): Path of semantic masks. """ # Use index to get the annos, thus the evalhook could also use this api info = self.data_infos[index] pts_semantic_mask_path = osp.join(self.data_root, info['pts_semantic_mask_path']) anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path) return anns_results def _build_default_pipeline(self): """Build the default pipeline for this dataset.""" pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=self.VALID_CLASS_IDS, max_cat_id=np.max(self.ALL_CLASS_IDS)), dict( type='DefaultFormatBundle3D', with_label=False, class_names=self.CLASSES), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] return Compose(pipeline) def show(self, results, out_dir, show=True, pipeline=None): """Results visualization. Args: results (list[dict]): List of bounding boxes results. out_dir (str): Output directory of visualization result. show (bool): Visualize the results online. pipeline (list[dict], optional): raw data loading for showing. Default: None. """ assert out_dir is not None, 'Expect out_dir, got none.' pipeline = self._get_pipeline(pipeline) for i, result in enumerate(results): data_info = self.data_infos[i] pts_path = data_info['pts_path'] file_name = osp.split(pts_path)[-1].split('.')[0] points, gt_sem_mask = self._extract_data( i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True) points = points.numpy() pred_sem_mask = result['semantic_mask'].numpy() show_seg_result(points, gt_sem_mask, pred_sem_mask, out_dir, file_name, np.array(self.PALETTE), self.ignore_index, show) def get_scene_idxs(self, scene_idxs): """Compute scene_idxs for data sampling. We sample more times for scenes with more points. """ # when testing, we load one whole scene every time if not self.test_mode and scene_idxs is None: raise NotImplementedError( 'please provide re-sampled scene indexes for training') return super().get_scene_idxs(scene_idxs) def format_results(self, results, txtfile_prefix=None): r"""Format the results to txt file. Refer to `ScanNet documentation `_. Args: outputs (list[dict]): Testing results of the dataset. txtfile_prefix (str): The prefix of saved files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (outputs, tmp_dir), outputs is the detection results, tmp_dir is the temporal directory created for saving submission files when ``submission_prefix`` is not specified. """ import mmcv if txtfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() txtfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None mmcv.mkdir_or_exist(txtfile_prefix) # need to map network output to original label idx pred2label = np.zeros(len(self.VALID_CLASS_IDS)).astype(np.int) for original_label, output_idx in self.label_map.items(): if output_idx != self.ignore_index: pred2label[output_idx] = original_label outputs = [] for i, result in enumerate(results): info = self.data_infos[i] sample_idx = info['point_cloud']['lidar_idx'] pred_sem_mask = result['semantic_mask'].numpy().astype(np.int) pred_label = pred2label[pred_sem_mask] curr_file = f'{txtfile_prefix}/{sample_idx}.txt' np.savetxt(curr_file, pred_label, fmt='%d') outputs.append(dict(seg_mask=pred_label)) return outputs, tmp_dir @DATASETS.register_module() class ScanNetInstanceSegDataset(Custom3DSegDataset): CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39) ALL_CLASS_IDS = tuple(range(41)) def get_ann_info(self, index): """Get annotation info according to the given index. Args: index (int): Index of the annotation data to get. Returns: dict: annotation information consists of the following keys: - pts_semantic_mask_path (str): Path of semantic masks. - pts_instance_mask_path (str): Path of instance masks. """ # Use index to get the annos, thus the evalhook could also use this api info = self.data_infos[index] pts_instance_mask_path = osp.join(self.data_root, info['pts_instance_mask_path']) pts_semantic_mask_path = osp.join(self.data_root, info['pts_semantic_mask_path']) anns_results = dict( pts_instance_mask_path=pts_instance_mask_path, pts_semantic_mask_path=pts_semantic_mask_path) return anns_results def get_classes_and_palette(self, classes=None, palette=None): """Get class names of current dataset. Palette is simply ignored for instance segmentation. Args: classes (Sequence[str] | str | None): If classes is None, use default CLASSES defined by builtin dataset. If classes is a string, take it as a file name. The file contains the name of classes where each line contains one class name. If classes is a tuple or list, override the CLASSES defined by the dataset. Defaults to None. palette (Sequence[Sequence[int]]] | np.ndarray | None): The palette of segmentation map. If None is given, random palette will be generated. Defaults to None. """ if classes is not None: return classes, None return self.CLASSES, None def _build_default_pipeline(self): """Build the default pipeline for this dataset.""" pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=self.VALID_CLASS_IDS, max_cat_id=40), dict( type='DefaultFormatBundle3D', with_label=False, class_names=self.CLASSES), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ] return Compose(pipeline) def evaluate(self, results, metric=None, options=None, logger=None, show=False, out_dir=None, pipeline=None): """Evaluation in instance segmentation protocol. Args: results (list[dict]): List of results. metric (str | list[str]): Metrics to be evaluated. options (dict, optional): options for instance_seg_eval. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Defaults to None. show (bool, optional): Whether to visualize. Defaults to False. out_dir (str, optional): Path to save the visualization results. Defaults to None. pipeline (list[dict], optional): raw data loading for showing. Default: None. Returns: dict: Evaluation results. """ assert isinstance( results, list), f'Expect results to be list, got {type(results)}.' assert len(results) > 0, 'Expect length of results > 0.' assert len(results) == len(self.data_infos) assert isinstance( results[0], dict ), f'Expect elements in results to be dict, got {type(results[0])}.' load_pipeline = self._get_pipeline(pipeline) pred_instance_masks = [result['instance_mask'] for result in results] pred_instance_labels = [result['instance_label'] for result in results] pred_instance_scores = [result['instance_score'] for result in results] gt_semantic_masks, gt_instance_masks = zip(*[ self._extract_data( index=i, pipeline=load_pipeline, key=['pts_semantic_mask', 'pts_instance_mask'], load_annos=True) for i in range(len(self.data_infos)) ]) ret_dict = instance_seg_eval( gt_semantic_masks, gt_instance_masks, pred_instance_masks, pred_instance_labels, pred_instance_scores, valid_class_ids=self.VALID_CLASS_IDS, class_labels=self.CLASSES, options=options, logger=logger) if show: raise NotImplementedError('show is not implemented for now') return ret_dict