Commit 19bf91e3 authored by Shaoshuai Shi's avatar Shaoshuai Shi
Browse files

refactor/rename some parts of the codes

parent 5666ea67
......@@ -2,7 +2,7 @@ import torch
import torch.nn as nn
import spconv.pytorch as spconv
from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import points_in_boxes_gpu
from pcdet.models.backbones_3d.focal_sparse_conv.utils import split_voxels, check_repeat, FocalLoss
from pcdet.models.backbones_3d.focal_sparse_conv.focal_sparse_utils import split_voxels, check_repeat, FocalLoss
from pcdet.utils import common_utils
......@@ -221,5 +221,4 @@ class FocalSparseConv(spconv.SparseModule):
out = out.replace_feature(self.bn1(out.features))
out = out.replace_feature(self.relu(out.features))
batch_dict['loss_box_of_pts'] += loss_box_of_pts
return out, batch_dict
return out, batch_dict, loss_box_of_pts
......@@ -195,6 +195,15 @@ class VoxelBackBone8xFocal(nn.Module):
'x_conv3': 64,
'x_conv4': 64
}
self.forward_ret_dict = {}
def get_loss(self, tb_dict=None):
loss = self.forward_ret_dict['loss_box_of_pts']
if tb_dict is None:
tb_dict = {}
tb_dict['loss_box_of_pts'] = loss.item()
return loss, tb_dict
def forward(self, batch_dict):
"""
......@@ -221,9 +230,12 @@ class VoxelBackBone8xFocal(nn.Module):
x = self.conv_input(input_sp_tensor)
x_conv1, batch_dict = self.conv1(x, batch_dict)
loss_box_of_pts = 0
if self.use_img:
x_image = self.semseg(batch_dict['images'])['layer1_feat2d']
x_conv1, batch_dict = self.conv_focal_multimodal(x_conv1, batch_dict, x_image)
x_conv1, batch_dict, loss_box_of_pts = self.conv_focal_multimodal(x_conv1, batch_dict, x_image)
self.forward_ret_dict['loss_box_of_pts'] = loss_box_of_pts
x_conv2, batch_dict = self.conv2(x_conv1, batch_dict)
x_conv3, batch_dict = self.conv3(x_conv2, batch_dict)
......
......@@ -12,9 +12,6 @@ class PVRCNN(Detector3DTemplate):
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
if 'loss_box_of_pts' in batch_dict:
loss += batch_dict['loss_box_of_pts']
tb_dict['loss_box_of_pts'] = batch_dict['loss_box_of_pts']
ret_dict = {
'loss': loss
......@@ -31,4 +28,9 @@ class PVRCNN(Detector3DTemplate):
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
......@@ -13,10 +13,6 @@ class VoxelRCNN(Detector3DTemplate):
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
if 'loss_box_of_pts' in batch_dict:
loss += batch_dict['loss_box_of_pts']
tb_dict['loss_box_of_pts'] = batch_dict['loss_box_of_pts']
ret_dict = {
'loss': loss
}
......@@ -33,4 +29,9 @@ class VoxelRCNN(Detector3DTemplate):
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
import torch
def area(box) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1, boxes2) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = area(boxes1)
area2 = area(boxes2)
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
del width_height
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
\ No newline at end of file
......@@ -333,3 +333,49 @@ def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
def area(box) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1, boxes2) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = area(boxes1)
area2 = area(boxes2)
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
del width_height
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment