Unverified Commit dde4b02c authored by Wenwei Zhang's avatar Wenwei Zhang Committed by GitHub
Browse files

Fix all warnings in pt1.6 (#72)

* Fix (torch.nonzero): Fix  warning of torch.nonzero and bug of nms_iou

* Fix contiguous warning of tensor

* Update CI with pt1.6

* update build action

* recover ci

* Add force cuda

* Use mmcv-full==1.0.5

* Fix nonzero warning

* Update CI config

* update links

* fix unit tests

* fix unit tests

* Lock pytorch version in CI

* Lock pytorch version in CI

* Lock pytorch version in CI

* Try specify torchlink

* Try specify torchlink

* Only add 3.7 for CI

* fix synctax error

* Use 1.3.0

* Use 1.3.0 and specify mmcv

* give up 1.3

* fix cublas issue

* fix cublas issue
parent 6356cbdc
......@@ -38,13 +38,19 @@ jobs:
strategy:
matrix:
python-version: [3.6, 3.7]
torch: [1.3.0, 1.5.0]
torch: [1.3.0, 1.5.0+cu101, 1.6.0+cu101]
include:
- torch: 1.3.0
torchvision: 0.4.2
mmcv: 1.3.0+cu101
cuda_arch: "6.0"
- torch: 1.5.0
torchvision: 0.6.0
- torch: 1.5.0+cu101
torchvision: 0.6.0+cu101
mmcv: 1.5.0+cu101
cuda_arch: "7.0"
- torch: 1.6.0+cu101
mmcv: 1.6.0+cu101
torchvision: 0.7.0+cu101
cuda_arch: "7.0"
steps:
......@@ -63,6 +69,8 @@ jobs:
sudo apt update -qq
sudo apt install -y cuda-${CUDA_SHORT/./-} cuda-cufft-dev-${CUDA_SHORT/./-}
sudo apt clean
sudo cp /usr/local/cuda-10.2/include/* /usr/local/cuda/include
sudo cp -r /usr/local/cuda-10.2/lib64/* /usr/local/cuda/lib64/
export CUDA_HOME=/usr/local/cuda-${CUDA_SHORT}
export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${CUDA_HOME}/include:${LD_LIBRARY_PATH}
export PATH=${CUDA_HOME}/bin:${PATH}
......@@ -74,7 +82,7 @@ jobs:
run: pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmdet3d dependencies
run: |
pip install mmcv-full==latest+torch${{matrix.torch}}+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html
pip install mmcv-full==latest+torch${{matrix.mmcv}} -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html
pip install -q git+https://github.com/open-mmlab/mmdetection.git
pip install -r requirements.txt
- name: Build and install
......
......@@ -9,7 +9,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v1
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install torch
......
......@@ -14,7 +14,7 @@ Documentation: https://mmdetection3d.readthedocs.io/
## Introduction
The master branch works with **PyTorch 1.3 to 1.5**.
The master branch works with **PyTorch 1.3 to 1.6**.
MMDetection3D is an open source object detection toolbox based on PyTorch, towards the next-generation platform for general 3D detection. It is
a part of the OpenMMLab project developed by [MMLab](http://mmlab.ie.cuhk.edu.hk/).
......
......@@ -249,6 +249,7 @@ total_epochs = 40
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = None
load_from = './pretrain_mmdet/mvx_faster_rcnn_detectron2-caffe_20e_coco-pretrain_gt-sample_kitti-3-class_moderate-79.3_20200207-a4a6a3c7.pth' # noqa
# You may need to download the model first is the network is unstable
load_from = 'https://openmmlab.oss-accelerate.aliyuncs.com/mmdetection3d/pretrain_models/mvx_faster_rcnn_detectron2-caffe_20e_coco-pretrain_gt-sample_kitti-3-class_moderate-79.3_20200207-a4a6a3c7.pth' # noqa
resume_from = None
workflow = [('train', 1)]
......@@ -114,22 +114,22 @@ class PartialBinBasedBBoxCoder(BaseBBoxCoder):
# decode objectness score
end += 2
results['obj_scores'] = preds_trans[..., start:end]
results['obj_scores'] = preds_trans[..., start:end].contiguous()
start = end
# decode center
end += 3
# (batch_size, num_proposal, 3)
results['center'] = base_xyz + preds_trans[..., start:end]
results['center'] = base_xyz + preds_trans[..., start:end].contiguous()
start = end
# decode direction
end += self.num_dir_bins
results['dir_class'] = preds_trans[..., start:end]
results['dir_class'] = preds_trans[..., start:end].contiguous()
start = end
end += self.num_dir_bins
dir_res_norm = preds_trans[..., start:end]
dir_res_norm = preds_trans[..., start:end].contiguous()
start = end
results['dir_res_norm'] = dir_res_norm
......@@ -137,7 +137,7 @@ class PartialBinBasedBBoxCoder(BaseBBoxCoder):
# decode size
end += self.num_sizes
results['size_class'] = preds_trans[..., start:end]
results['size_class'] = preds_trans[..., start:end].contiguous()
start = end
end += self.num_sizes * 3
......@@ -147,13 +147,13 @@ class PartialBinBasedBBoxCoder(BaseBBoxCoder):
[batch_size, num_proposal, self.num_sizes, 3])
start = end
results['size_res_norm'] = size_res_norm
results['size_res_norm'] = size_res_norm.contiguous()
mean_sizes = preds.new_tensor(self.mean_sizes)
results['size_res'] = (
size_res_norm * mean_sizes.unsqueeze(0).unsqueeze(0))
# decode semantic score
results['sem_scores'] = preds_trans[..., start:]
results['sem_scores'] = preds_trans[..., start:].contiguous()
return results
......
......@@ -157,7 +157,8 @@ class VoteHead(nn.Module):
torch.randint(0, num_seed, (batch_size, self.num_proposal)),
dtype=torch.int32)
else:
raise NotImplementedError
raise NotImplementedError(
f'Sample mode {sample_mod} is not supported!')
vote_aggregation_ret = self.vote_aggregation(vote_points,
vote_features,
......@@ -261,7 +262,7 @@ class VoteHead(nn.Module):
(batch_size, proposal_num, self.num_sizes))
one_hot_size_targets.scatter_(2, size_class_targets.unsqueeze(-1), 1)
one_hot_size_targets_expand = one_hot_size_targets.unsqueeze(
-1).repeat(1, 1, 1, 3)
-1).repeat(1, 1, 1, 3).contiguous()
size_residual_norm = torch.sum(
bbox_preds['size_res_norm'] * one_hot_size_targets_expand, 2)
box_loss_weights_expand = box_loss_weights.unsqueeze(-1).repeat(
......
......@@ -88,13 +88,13 @@ class VoteModule(nn.Module):
votes = votes.transpose(2, 1).view(batch_size, num_seed,
self.vote_per_seed, -1)
offset = votes[:, :, :, 0:3]
res_feats = votes[:, :, :, 3:]
offset = votes[:, :, :, 0:3].contiguous()
res_feats = votes[:, :, :, 3:].contiguous()
vote_points = (seed_points.unsqueeze(2) + offset).contiguous()
vote_points = seed_points.unsqueeze(2) + offset
vote_points = vote_points.view(batch_size, num_vote, 3)
vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) +
res_feats).contiguous()
vote_feats = seed_feats.permute(
0, 2, 1).unsqueeze(2).contiguous() + res_feats
vote_feats = vote_feats.view(batch_size, num_vote,
feat_channels).transpose(2,
1).contiguous()
......
......@@ -246,7 +246,7 @@ class PartA2BboxHead(nn.Module):
# transform to sparse tensors
sparse_shape = part_feats.shape[1:4]
# (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
sparse_idx = part_feats.sum(dim=-1).nonzero()
sparse_idx = part_feats.sum(dim=-1).nonzero(as_tuple=False)
part_features = part_feats[sparse_idx[:, 0], sparse_idx[:, 1],
sparse_idx[:, 2], sparse_idx[:, 3]]
......@@ -600,7 +600,8 @@ class PartA2BboxHead(nn.Module):
class_scores_keep = box_probs[:, k] >= score_thresh[k]
if class_scores_keep.int().sum() > 0:
original_idxs = class_scores_keep.nonzero().view(-1)
original_idxs = class_scores_keep.nonzero(
as_tuple=False).view(-1)
cur_boxes_for_nms = boxes_for_nms[class_scores_keep]
cur_rank_scores = box_probs[class_scores_keep, k]
......
......@@ -260,7 +260,8 @@ class PartAggregationROIHead(Base3DRoIHead):
# gather assign_results in different class into one result
batch_num_gts += cur_assign_res.num_gts
# gt inds (1-based)
gt_inds_arange_pad = gt_per_cls.nonzero().view(-1) + 1
gt_inds_arange_pad = gt_per_cls.nonzero(
as_tuple=False).view(-1) + 1
# pad 0 for indice unassigned
gt_inds_arange_pad = F.pad(
gt_inds_arange_pad, (1, 0), mode='constant', value=0)
......
......@@ -50,19 +50,22 @@ def test_chamfer_disrance():
assert torch.allclose(loss_source, torch.tensor(219.5936))
assert torch.allclose(loss_target, torch.tensor(22.3705))
assert (indices1 == indices1.new_tensor([[0, 4, 4, 4, 4, 2, 4, 4, 4, 3],
[0, 1, 0, 1, 0, 4, 2, 0, 0,
1]])).all()
assert (indices2 == indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0,
0]])).all()
expected_inds1 = [[0, 4, 4, 4, 4, 2, 4, 4, 4, 3],
[0, 1, 0, 1, 0, 4, 2, 0, 0, 1]]
expected_inds2 = [[0, 4, 4, 4, 4, 2, 4, 4, 4, 3],
[0, 1, 0, 1, 0, 3, 2, 0, 0, 1]]
assert (torch.equal(indices1, indices1.new_tensor(expected_inds1))
or torch.equal(indices1, indices1.new_tensor(expected_inds2)))
assert torch.equal(indices2,
indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0, 0]]))
loss_source, loss_target, indices1, indices2 = chamfer_distance(
source, target, reduction='sum')
assert torch.allclose(loss_source, torch.tensor(219.5936))
assert torch.allclose(loss_target, torch.tensor(22.3705))
assert (indices1 == indices1.new_tensor([[0, 4, 4, 4, 4, 2, 4, 4, 4, 3],
[0, 1, 0, 1, 0, 4, 2, 0, 0,
1]])).all()
assert (torch.equal(indices1, indices1.new_tensor(expected_inds1))
or torch.equal(indices1, indices1.new_tensor(expected_inds2)))
assert (indices2 == indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0,
0]])).all()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment