Unverified Commit e70a318a authored by pc's avatar pc Committed by GitHub
Browse files

[Fix] Fix parrots unit tests (#1913)

parent a86e8071
......@@ -50,7 +50,8 @@ class CARAFENaiveFunction(Function):
group_size=group_size,
scale_factor=scale_factor)
if features.requires_grad or masks.requires_grad:
if features.requires_grad or masks.requires_grad or \
torch.__version__ == 'parrots':
ctx.save_for_backward(features, masks)
return output
......@@ -139,7 +140,8 @@ class CARAFEFunction(Function):
group_size=group_size,
scale_factor=scale_factor)
if features.requires_grad or masks.requires_grad:
if features.requires_grad or masks.requires_grad or \
torch.__version__ == 'parrots':
ctx.save_for_backward(features, masks, rfeatures)
return output
......
......@@ -1396,6 +1396,12 @@ int HardVoxelizeForwardCUDAKernelLauncher(
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim = 3);
int NondeterministicHardVoxelizeForwardCUDAKernelLauncher(
const at::Tensor& points, at::Tensor& voxels, at::Tensor& coors,
at::Tensor& num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim = 3);
void DynamicVoxelizeForwardCUDAKernelLauncher(
const at::Tensor& points, at::Tensor& coors,
const std::vector<float> voxel_size, const std::vector<float> coors_range,
......@@ -1413,6 +1419,16 @@ int hard_voxelize_forward_cuda(const at::Tensor& points, at::Tensor& voxels,
max_points, max_voxels, NDim);
};
int nondeterministic_hard_voxelize_forward_cuda(
const at::Tensor& points, at::Tensor& voxels, at::Tensor& coors,
at::Tensor& num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim) {
return NondeterministicHardVoxelizeForwardCUDAKernelLauncher(
points, voxels, coors, num_points_per_voxel, voxel_size, coors_range,
max_points, max_voxels, NDim);
};
void dynamic_voxelize_forward_cuda(const at::Tensor& points, at::Tensor& coors,
const std::vector<float> voxel_size,
const std::vector<float> coors_range,
......@@ -1429,6 +1445,12 @@ int hard_voxelize_forward_impl(const at::Tensor& points, at::Tensor& voxels,
const int max_points, const int max_voxels,
const int NDim);
int nondeterministic_hard_voxelize_forward_impl(
const at::Tensor& points, at::Tensor& voxels, at::Tensor& coors,
at::Tensor& num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim);
void dynamic_voxelize_forward_impl(const at::Tensor& points, at::Tensor& coors,
const std::vector<float> voxel_size,
const std::vector<float> coors_range,
......@@ -1436,6 +1458,8 @@ void dynamic_voxelize_forward_impl(const at::Tensor& points, at::Tensor& coors,
REGISTER_DEVICE_IMPL(hard_voxelize_forward_impl, CUDA,
hard_voxelize_forward_cuda);
REGISTER_DEVICE_IMPL(nondeterministic_hard_voxelize_forward_impl, CUDA,
nondeterministic_hard_voxelize_forward_cuda);
REGISTER_DEVICE_IMPL(dynamic_voxelize_forward_impl, CUDA,
dynamic_voxelize_forward_cuda);
......
......@@ -14,6 +14,17 @@ int hard_voxelize_forward_impl(const at::Tensor &points, at::Tensor &voxels,
max_points, max_voxels, NDim);
}
int nondeterministic_hard_voxelize_forward_impl(
const at::Tensor &points, at::Tensor &voxels, at::Tensor &coors,
at::Tensor &num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim = 3) {
return DISPATCH_DEVICE_IMPL(nondeterministic_hard_voxelize_forward_impl,
points, voxels, coors, num_points_per_voxel,
voxel_size, coors_range, max_points, max_voxels,
NDim);
}
void dynamic_voxelize_forward_impl(const at::Tensor &points, at::Tensor &coors,
const std::vector<float> voxel_size,
const std::vector<float> coors_range,
......@@ -27,7 +38,8 @@ void hard_voxelize_forward(const at::Tensor &points,
const at::Tensor &coors_range, at::Tensor &voxels,
at::Tensor &coors, at::Tensor &num_points_per_voxel,
at::Tensor &voxel_num, const int max_points,
const int max_voxels, const int NDim = 3) {
const int max_voxels, const int NDim = 3,
const bool deterministic = true) {
int64_t *voxel_num_data = voxel_num.data_ptr<int64_t>();
std::vector<float> voxel_size_v(
voxel_size.data_ptr<float>(),
......@@ -36,9 +48,15 @@ void hard_voxelize_forward(const at::Tensor &points,
coors_range.data_ptr<float>(),
coors_range.data_ptr<float>() + coors_range.numel());
if (deterministic) {
*voxel_num_data = hard_voxelize_forward_impl(
points, voxels, coors, num_points_per_voxel, voxel_size_v, coors_range_v,
max_points, max_voxels, NDim);
points, voxels, coors, num_points_per_voxel, voxel_size_v,
coors_range_v, max_points, max_voxels, NDim);
} else {
*voxel_num_data = nondeterministic_hard_voxelize_forward_impl(
points, voxels, coors, num_points_per_voxel, voxel_size_v,
coors_range_v, max_points, max_voxels, NDim);
}
}
void dynamic_voxelize_forward(const at::Tensor &points,
......
......@@ -12,10 +12,12 @@ void hard_voxelize_forward_cuda_parrots(CudaContext& ctx, const SSElement& attr,
const OperatorBase::in_list_t& ins,
OperatorBase::out_list_t& outs) {
int max_points, max_voxels, NDim;
bool deterministic;
SSAttrs(attr)
.get<int>("max_points", max_points)
.get<int>("max_voxels", max_voxels)
.get<int>("NDim", NDim)
.get<bool>("deterministic", deterministic)
.done();
const auto& points = buildATensor(ctx, ins[0]);
const auto& voxel_size = buildATensor(ctx, ins[1]);
......@@ -28,7 +30,7 @@ void hard_voxelize_forward_cuda_parrots(CudaContext& ctx, const SSElement& attr,
hard_voxelize_forward(points, voxel_size, coors_range, voxels, coors,
num_points_per_voxel, voxel_num, max_points, max_voxels,
NDim);
NDim, deterministic);
}
void dynamic_voxelize_forward_cuda_parrots(CudaContext& ctx,
......@@ -51,10 +53,12 @@ void hard_voxelize_forward_cpu_parrots(HostContext& ctx, const SSElement& attr,
const OperatorBase::in_list_t& ins,
OperatorBase::out_list_t& outs) {
int max_points, max_voxels, NDim;
bool deterministic;
SSAttrs(attr)
.get<int>("max_points", max_points)
.get<int>("max_voxels", max_voxels)
.get<int>("NDim", NDim)
.get<bool>("deterministic", deterministic)
.done();
const auto& points = buildATensor(ctx, ins[0]);
const auto& voxel_size = buildATensor(ctx, ins[1]);
......@@ -67,7 +71,7 @@ void hard_voxelize_forward_cpu_parrots(HostContext& ctx, const SSElement& attr,
hard_voxelize_forward(points, voxel_size, coors_range, voxels, coors,
num_points_per_voxel, voxel_num, max_points, max_voxels,
NDim);
NDim, deterministic);
}
void dynamic_voxelize_forward_cpu_parrots(HostContext& ctx,
......@@ -89,6 +93,7 @@ PARROTS_EXTENSION_REGISTER(hard_voxelize_forward)
.attr("max_points")
.attr("max_voxels")
.attr("NDim")
.attr("deterministic")
.input(3)
.output(4)
.apply(hard_voxelize_forward_cpu_parrots)
......
......@@ -9,7 +9,8 @@ void hard_voxelize_forward(const at::Tensor &points,
const at::Tensor &coors_range, at::Tensor &voxels,
at::Tensor &coors, at::Tensor &num_points_per_voxel,
at::Tensor &voxel_num, const int max_points,
const int max_voxels, const int NDim = 3);
const int max_voxels, const int NDim = 3,
const bool deterministic = true);
void dynamic_voxelize_forward(const at::Tensor &points,
const at::Tensor &voxel_size,
......
......@@ -161,16 +161,8 @@ def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1):
assert boxes.size(0) == scores.size(0)
assert offset in (0, 1)
if torch.__version__ == 'parrots':
indata_list = [boxes, scores]
indata_dict = {
'iou_threshold': float(iou_threshold),
'offset': int(offset)
}
inds = ext_module.nms(*indata_list, **indata_dict)
else:
inds = NMSop.apply(boxes, scores, iou_threshold, offset,
score_threshold, max_num)
inds = NMSop.apply(boxes, scores, iou_threshold, offset, score_threshold,
max_num)
dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1)
if is_numpy:
dets = dets.cpu().numpy()
......
......@@ -16,6 +16,8 @@ else:
import re
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
x = torch.randn(1, 3, 10, 10)
......
......@@ -14,6 +14,9 @@ from mmcv.cnn import (Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit,
initialize, kaiming_init, normal_init, trunc_normal_init,
uniform_init, xavier_init)
if torch.__version__ == 'parrots':
pytest.skip('not supported in parrots now', allow_module_level=True)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
......
......@@ -9,11 +9,15 @@ from unittest.mock import MagicMock, patch
import cv2
import numpy as np
import pytest
import torch
from numpy.testing import assert_allclose, assert_array_equal
import mmcv
from mmcv.fileio.file_client import HTTPBackend, PetrelBackend
if torch.__version__ == 'parrots':
pytest.skip('not necessary in parrots test', allow_module_level=True)
class TestIO:
......
......@@ -13,6 +13,8 @@ import torch.nn.functional as F
from packaging import version
onnx_file = 'tmp.onnx'
if torch.__version__ == 'parrots':
pytest.skip('not supported in parrots now', allow_module_level=True)
@pytest.fixture(autouse=True)
......@@ -123,8 +125,6 @@ def test_bilinear_grid_sample(align_corners):
def test_nms():
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
from mmcv.ops import get_onnxruntime_op_path, nms
np_boxes = np.array([[6.0, 3.0, 8.0, 7.0], [3.0, 6.0, 9.0, 11.0],
[3.0, 7.0, 10.0, 12.0], [1.0, 4.0, 13.0, 7.0]],
......@@ -171,8 +171,6 @@ def test_nms():
@pytest.mark.skipif(not torch.cuda.is_available(), reason='test requires GPU')
def test_softnms():
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
from mmcv.ops import get_onnxruntime_op_path, soft_nms
# only support pytorch >= 1.7.0
......@@ -247,8 +245,6 @@ def test_softnms():
def test_roialign():
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
try:
from mmcv.ops import get_onnxruntime_op_path, roi_align
except (ImportError, ModuleNotFoundError):
......@@ -319,8 +315,6 @@ def test_roialign():
def test_roialign_rotated():
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
try:
from mmcv.ops import get_onnxruntime_op_path, roi_align_rotated
except (ImportError, ModuleNotFoundError):
......@@ -398,8 +392,6 @@ def test_roialign_rotated():
@pytest.mark.skipif(not torch.cuda.is_available(), reason='test requires GPU')
def test_roipool():
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
from mmcv.ops import roi_pool
# roi pool config
......@@ -602,8 +594,6 @@ def test_rotated_feature_align():
@pytest.mark.parametrize('mode', ['top', 'bottom', 'left', 'right'])
def test_corner_pool(mode, opset=11):
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
......@@ -648,8 +638,6 @@ def test_corner_pool(mode, opset=11):
@pytest.mark.parametrize('key', ['cummax', 'cummin'])
def test_cummax_cummin(key, opset=11):
if torch.__version__ == 'parrots':
pytest.skip('onnx is not supported in parrots directly')
# Note generally `cummax` or `cummin` is exportable to ONNX
# as long as the pytorch version >= 1.5.0, since `torch.cummax`
......@@ -758,9 +746,6 @@ def test_roll(shifts_dims_pair):
torch.testing.assert_allclose(ort_output, pytorch_output)
@pytest.mark.skipif(
torch.__version__ == 'parrots',
reason='onnx is not supported in parrots directly')
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason='modulated_deform_conv2d only supports in GPU')
......@@ -852,9 +837,6 @@ def test_modulated_deform_conv2d():
assert np.allclose(pytorch_output, onnx_output, atol=1e-3)
@pytest.mark.skipif(
torch.__version__ == 'parrots',
reason='onnx is not supported in parrots directly')
def test_deform_conv2d(threshold=1e-3):
try:
from mmcv.ops import DeformConv2d, get_onnxruntime_op_path
......
......@@ -2,10 +2,16 @@
import numpy as np
import pytest
import torch
from torch.autograd import gradcheck
from mmcv.ops import RiRoIAlignRotated
if torch.__version__ == 'parrots':
from parrots.autograd import gradcheck
_USING_PARROTS = True
else:
from torch.autograd import gradcheck
_USING_PARROTS = False
np_feature = np.array([[[[1, 2], [3, 4]], [[1, 2], [4, 3]], [[4, 3], [2, 1]],
[[1, 2], [5, 6]], [[3, 4], [7, 8]], [[9, 10], [13,
14]],
......@@ -55,6 +61,10 @@ def test_roialign_rotated_gradcheck():
rois = torch.tensor(np_rois, dtype=torch.float, device='cuda')
froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples,
num_orientations, clockwise)
if _USING_PARROTS:
gradcheck(
froipool, (x, rois), no_grads=[rois], delta=1e-3, pt_atol=1e-3)
else:
gradcheck(froipool, (x, rois), eps=1e-3, atol=1e-3)
......
......@@ -5,6 +5,9 @@ from torch.autograd import gradcheck
from mmcv.ops import DynamicScatter
if torch.__version__ == 'parrots':
pytest.skip('not supported in parrots now', allow_module_level=True)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
......
......@@ -7,6 +7,9 @@ from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.ops import (SparseConvTensor, SparseInverseConv3d, SparseSequential,
SubMConv3d)
if torch.__version__ == 'parrots':
pytest.skip('not supported in parrots now', allow_module_level=True)
def make_sparse_convmodule(in_channels,
out_channels,
......
......@@ -3,11 +3,15 @@ import os
from functools import wraps
import onnx
import pytest
import torch
from mmcv.ops import nms
from mmcv.tensorrt.preprocess import preprocess_onnx
if torch.__version__ == 'parrots':
pytest.skip('not supported in parrots now', allow_module_level=True)
def remove_tmp_file(func):
......
......@@ -17,6 +17,8 @@ def mock(*args, **kwargs):
pass
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
......@@ -122,6 +124,8 @@ def test_scatter():
scatter(5, [-1])
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_Scatter():
# if the device is CPU, just return the input
target_gpus = [-1]
......
......@@ -44,6 +44,8 @@ sys.modules['petrel_client'] = MagicMock()
sys.modules['petrel_client.client'] = MagicMock()
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_optimizerhook():
class Model(nn.Module):
......@@ -929,6 +931,8 @@ def test_flat_cosine_runner_hook(multi_optimizers, by_epoch):
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
@pytest.mark.parametrize('multi_optimizers, max_iters', [(True, 10), (True, 2),
(False, 10),
(False, 2)])
......
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.utils import model_zoo
from mmcv.utils import TORCH_VERSION, digit_version, load_url
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not necessary in parrots test')
def test_load_url():
url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth'
url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth'
......
......@@ -5,6 +5,7 @@ import torch
import mmcv
from mmcv.utils import TORCH_VERSION
pytest.skip('this test not ready now', allow_module_level=True)
skip_no_parrots = pytest.mark.skipif(
TORCH_VERSION != 'parrots', reason='test case under parrots environment')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment