"template/codellama-70b-instruct.json" did not exist on "23ebbaa46ead40c44c20b707b0e53d954ea51dc5"
base.py 6.48 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
from abc import ABCMeta, abstractmethod
from typing import List, Tuple
4

5
6
7
from mmengine.data import PixelData
from mmengine.model import BaseModel
from torch import Tensor
8

zhangshilong's avatar
zhangshilong committed
9
10
11
12
from mmdet3d.structures import Det3DDataSample
from mmdet3d.structures.det3d_data_sample import (ForwardResults,
                                                  OptSampleList, SampleList)
from mmdet3d.utils import OptConfigType, OptMultiConfig
13
14


15
class Base3DSegmentor(BaseModel, metaclass=ABCMeta):
16
17
    """Base class for 3D segmentors.

18
19
20
21
22
23
24
    Args:
        data_preprocessor (dict, optional): Model preprocessing config
            for processing the input data. it usually includes
            ``to_rgb``, ``pad_size_divisor``, ``pad_val``,
            ``mean`` and ``std``. Default to None.
       init_cfg (dict, optional): the config to control the
           initialization. Default to None.
25
26
    """

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    def __init__(self,
                 data_preprocessor: OptConfigType = None,
                 init_cfg: OptMultiConfig = None):
        super(Base3DSegmentor, self).__init__(
            data_preprocessor=data_preprocessor, init_cfg=init_cfg)

    @property
    def with_neck(self) -> bool:
        """bool: whether the segmentor has neck"""
        return hasattr(self, 'neck') and self.neck is not None

    @property
    def with_auxiliary_head(self) -> bool:
        """bool: whether the segmentor has auxiliary head"""
        return hasattr(self,
                       'auxiliary_head') and self.auxiliary_head is not None

44
    @property
45
46
47
48
49
50
    def with_decode_head(self) -> bool:
        """bool: whether the segmentor has decode head"""
        return hasattr(self, 'decode_head') and self.decode_head is not None

    @property
    def with_regularization_loss(self) -> bool:
51
52
53
54
        """bool: whether the segmentor has regularization loss for weight"""
        return hasattr(self, 'loss_regularization') and \
            self.loss_regularization is not None

55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
    @abstractmethod
    def extract_feat(self, batch_inputs: Tensor) -> bool:
        """Placeholder for extract features from images."""
        pass

    @abstractmethod
    def encode_decode(self, batch_inputs: Tensor,
                      batch_data_samples: SampleList):
        """Placeholder for encode images with backbone and decode into a
        semantic segmentation map of the same size as input."""
        pass

    def forward(self,
                batch_inputs_dict: Tensor,
                batch_data_samples: OptSampleList = None,
                mode: str = 'tensor') -> ForwardResults:
        """The unified entry for a forward process in both training and test.

        The method should accept three modes: "tensor", "predict" and "loss":

        - "tensor": Forward the whole network and return tensor or tuple of
        tensor without any post-processing, same as a common nn.Module.
        - "predict": Forward and return the predictions, which are fully
        processed to a list of :obj:`SegDataSample`.
        - "loss": Forward and return a dict of losses according to the given
        inputs and data samples.

        Note that this method doesn't handle neither back propagation nor
        optimizer updating, which are done in the :meth:`train_step`.
84
85

        Args:
86
87
            batch_inputs_dict (dict): Input sample dict which
                includes 'points' and 'imgs' keys.
88

89
90
91
92
93
                - points (list[torch.Tensor]): Point cloud of each sample.
                - imgs (torch.Tensor): Image tensor has shape (B, C, H, W).
            batch_data_samples (list[:obj:`Det3DDataSample`], optional):
                The annotation data of every samples. Defaults to None.
            mode (str): Return what kind of value. Defaults to 'tensor'.
94

95
96
97
98
99
100
        Returns:
            The return type depends on ``mode``.

            - If ``mode="tensor"``, return a tensor or a tuple of tensor.
            - If ``mode="predict"``, return a list of :obj:`Det3DDataSample`.
            - If ``mode="loss"``, return a dict of tensor.
101
        """
102
103
104
105
106
107
        if mode == 'loss':
            return self.loss(batch_inputs_dict, batch_data_samples)
        elif mode == 'predict':
            return self.predict(batch_inputs_dict, batch_data_samples)
        elif mode == 'tensor':
            return self._forward(batch_inputs_dict, batch_data_samples)
108
        else:
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
            raise RuntimeError(f'Invalid mode "{mode}". '
                               'Only supports loss, predict and tensor mode')

    @abstractmethod
    def loss(self, batch_inputs: Tensor,
             batch_data_samples: SampleList) -> dict:
        """Calculate losses from a batch of inputs and data samples."""
        pass

    @abstractmethod
    def predict(self, batch_inputs: Tensor,
                batch_data_samples: SampleList) -> SampleList:
        """Predict results from a batch of inputs and data samples with post-
        processing."""
        pass

    @abstractmethod
    def _forward(
            self,
            batch_inputs: Tensor,
            batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
        """Network forward process.

        Usually includes backbone, neck and head forward without any post-
        processing.
        """
        pass

    @abstractmethod
    def aug_test(self, batch_inputs, batch_img_metas):
        """Placeholder for augmentation test."""
        pass
141

142
143
144
    def postprocess_result(self, seg_logits_list: List[dict],
                           batch_img_metas: List[dict]) -> list:
        """ Convert results list to `Det3DDataSample`.
145
        Args:
146
147
148
149
150
151
152
153
154
155
156
            seg_logits_list (List[dict]): List of segmentation results,
                seg_logits from model of each input point clouds sample.

        Returns:
            list[:obj:`Det3DDataSample`]: Segmentation results of the
            input images. Each Det3DDataSample usually contain:

            - ``pred_pts_sem_seg``(PixelData): Prediction of 3D
                semantic segmentation.
            - ``seg_logits``(PixelData): Predicted logits of semantic
                segmentation before normalization.
157
        """
158
159
160
161
162
163
164
165
166
167
168
        predictions = []

        for i in range(len(seg_logits_list)):
            img_meta = batch_img_metas[i]
            seg_logits = seg_logits_list[i][None],
            seg_pred = seg_logits.argmax(dim=0, keepdim=True)
            prediction = Det3DDataSample(**{'metainfo': img_meta})
            prediction.set_data(
                {'pred_pts_sem_seg': PixelData(**{'data': seg_pred})})
            predictions.append(prediction)
        return predictions