Unverified Commit 45fa3e44 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Add pyupgrade pre-commit hook (#1937)

* add pyupgrade

* add options for pyupgrade

* minor refinement
parent c561264d
...@@ -4,7 +4,7 @@ import pytest ...@@ -4,7 +4,7 @@ import pytest
import torch import torch
class TestBoxIoURotated(object): class TestBoxIoURotated:
def test_box_iou_rotated_cpu(self): def test_box_iou_rotated_cpu(self):
from mmcv.ops import box_iou_rotated from mmcv.ops import box_iou_rotated
......
...@@ -3,7 +3,7 @@ import torch ...@@ -3,7 +3,7 @@ import torch
from torch.autograd import gradcheck from torch.autograd import gradcheck
class TestCarafe(object): class TestCarafe:
def test_carafe_naive_gradcheck(self): def test_carafe_naive_gradcheck(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -15,7 +15,7 @@ class Loss(nn.Module): ...@@ -15,7 +15,7 @@ class Loss(nn.Module):
return torch.mean(input - target) return torch.mean(input - target)
class TestCrissCrossAttention(object): class TestCrissCrossAttention:
def test_cc_attention(self): def test_cc_attention(self):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
......
...@@ -35,7 +35,7 @@ gt_offset_bias_grad = [1.44, -0.72, 0., 0., -0.10, -0.08, -0.54, -0.54], ...@@ -35,7 +35,7 @@ gt_offset_bias_grad = [1.44, -0.72, 0., 0., -0.10, -0.08, -0.54, -0.54],
gt_deform_weight_grad = [[[[3.62, 0.], [0.40, 0.18]]]] gt_deform_weight_grad = [[[[3.62, 0.], [0.40, 0.18]]]]
class TestDeformconv(object): class TestDeformconv:
def _test_deformconv(self, def _test_deformconv(self,
dtype=torch.float, dtype=torch.float,
......
...@@ -35,7 +35,7 @@ outputs = [([[[[1, 1.25], [1.5, 1.75]]]], [[[[3.0625, 0.4375], ...@@ -35,7 +35,7 @@ outputs = [([[[[1, 1.25], [1.5, 1.75]]]], [[[[3.0625, 0.4375],
0.00390625]]]])] 0.00390625]]]])]
class TestDeformRoIPool(object): class TestDeformRoIPool:
def test_deform_roi_pool_gradcheck(self): def test_deform_roi_pool_gradcheck(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -37,7 +37,7 @@ sigmoid_outputs = [(0.13562961, [[-0.00657264, 0.11185755], ...@@ -37,7 +37,7 @@ sigmoid_outputs = [(0.13562961, [[-0.00657264, 0.11185755],
[-0.02462499, 0.08277918, 0.18050370]])] [-0.02462499, 0.08277918, 0.18050370]])]
class Testfocalloss(object): class Testfocalloss:
def _test_softmax(self, dtype=torch.float): def _test_softmax(self, dtype=torch.float):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -10,7 +10,7 @@ except ImportError: ...@@ -10,7 +10,7 @@ except ImportError:
_USING_PARROTS = False _USING_PARROTS = False
class TestFusedBiasLeakyReLU(object): class TestFusedBiasLeakyReLU:
@classmethod @classmethod
def setup_class(cls): def setup_class(cls):
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
import torch import torch
class TestInfo(object): class TestInfo:
def test_info(self): def test_info(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
import torch import torch
class TestMaskedConv2d(object): class TestMaskedConv2d:
def test_masked_conv2d(self): def test_masked_conv2d(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -37,7 +37,7 @@ dcn_offset_b_grad = [ ...@@ -37,7 +37,7 @@ dcn_offset_b_grad = [
] ]
class TestMdconv(object): class TestMdconv:
def _test_mdconv(self, dtype=torch.float, device='cuda'): def _test_mdconv(self, dtype=torch.float, device='cuda'):
if not torch.cuda.is_available() and device == 'cuda': if not torch.cuda.is_available() and device == 'cuda':
......
...@@ -55,7 +55,7 @@ def test_forward_multi_scale_deformable_attn_pytorch(): ...@@ -55,7 +55,7 @@ def test_forward_multi_scale_deformable_attn_pytorch():
N, M, D = 1, 2, 2 N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2 Lq, L, P = 2, 2, 2
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long)
S = sum([(H * W).item() for H, W in shapes]) S = sum((H * W).item() for H, W in shapes)
torch.manual_seed(3) torch.manual_seed(3)
value = torch.rand(N, S, M, D) * 0.01 value = torch.rand(N, S, M, D) * 0.01
...@@ -78,7 +78,7 @@ def test_forward_equal_with_pytorch_double(): ...@@ -78,7 +78,7 @@ def test_forward_equal_with_pytorch_double():
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros( level_start_index = torch.cat((shapes.new_zeros(
(1, )), shapes.prod(1).cumsum(0)[:-1])) (1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H * W).item() for H, W in shapes]) S = sum((H * W).item() for H, W in shapes)
torch.manual_seed(3) torch.manual_seed(3)
value = torch.rand(N, S, M, D).cuda() * 0.01 value = torch.rand(N, S, M, D).cuda() * 0.01
...@@ -111,7 +111,7 @@ def test_forward_equal_with_pytorch_float(): ...@@ -111,7 +111,7 @@ def test_forward_equal_with_pytorch_float():
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros( level_start_index = torch.cat((shapes.new_zeros(
(1, )), shapes.prod(1).cumsum(0)[:-1])) (1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H * W).item() for H, W in shapes]) S = sum((H * W).item() for H, W in shapes)
torch.manual_seed(3) torch.manual_seed(3)
value = torch.rand(N, S, M, D).cuda() * 0.01 value = torch.rand(N, S, M, D).cuda() * 0.01
...@@ -155,7 +155,7 @@ def test_gradient_numerical(channels, ...@@ -155,7 +155,7 @@ def test_gradient_numerical(channels,
shapes = torch.as_tensor([(3, 2), (2, 1)], dtype=torch.long).cuda() shapes = torch.as_tensor([(3, 2), (2, 1)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros( level_start_index = torch.cat((shapes.new_zeros(
(1, )), shapes.prod(1).cumsum(0)[:-1])) (1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H * W).item() for H, W in shapes]) S = sum((H * W).item() for H, W in shapes)
value = torch.rand(N, S, M, channels).cuda() * 0.01 value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
......
...@@ -6,7 +6,7 @@ import torch ...@@ -6,7 +6,7 @@ import torch
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE
class Testnms(object): class Testnms:
@pytest.mark.parametrize('device', [ @pytest.mark.parametrize('device', [
pytest.param( pytest.param(
...@@ -129,8 +129,7 @@ class Testnms(object): ...@@ -129,8 +129,7 @@ class Testnms(object):
scores = tensor_dets[:, 4] scores = tensor_dets[:, 4]
nms_keep_inds = nms(boxes.contiguous(), scores.contiguous(), nms_keep_inds = nms(boxes.contiguous(), scores.contiguous(),
iou_thr)[1] iou_thr)[1]
assert set([g[0].item() assert {g[0].item() for g in np_groups} == set(nms_keep_inds.tolist())
for g in np_groups]) == set(nms_keep_inds.tolist())
# non empty tensor input # non empty tensor input
tensor_dets = torch.from_numpy(np_dets) tensor_dets = torch.from_numpy(np_dets)
......
...@@ -33,7 +33,7 @@ def run_before_and_after_test(): ...@@ -33,7 +33,7 @@ def run_before_and_after_test():
class WrapFunction(nn.Module): class WrapFunction(nn.Module):
def __init__(self, wrapped_function): def __init__(self, wrapped_function):
super(WrapFunction, self).__init__() super().__init__()
self.wrapped_function = wrapped_function self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
...@@ -662,7 +662,7 @@ def test_cummax_cummin(key, opset=11): ...@@ -662,7 +662,7 @@ def test_cummax_cummin(key, opset=11):
input_list = [ input_list = [
# arbitrary shape, e.g. 1-D, 2-D, 3-D, ... # arbitrary shape, e.g. 1-D, 2-D, 3-D, ...
torch.rand((2, 3, 4, 1, 5)), torch.rand((2, 3, 4, 1, 5)),
torch.rand((1)), torch.rand(1),
torch.rand((2, 0, 1)), # tensor.numel() is 0 torch.rand((2, 0, 1)), # tensor.numel() is 0
torch.FloatTensor(), # empty tensor torch.FloatTensor(), # empty tensor
] ]
......
...@@ -15,7 +15,7 @@ class Loss(nn.Module): ...@@ -15,7 +15,7 @@ class Loss(nn.Module):
return torch.mean(input - target) return torch.mean(input - target)
class TestPSAMask(object): class TestPSAMask:
def test_psa_mask_collect(self): def test_psa_mask_collect(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -29,7 +29,7 @@ outputs = [([[[[1., 2.], [3., 4.]]]], [[[[1., 1.], [1., 1.]]]]), ...@@ -29,7 +29,7 @@ outputs = [([[[[1., 2.], [3., 4.]]]], [[[[1., 1.], [1., 1.]]]]),
1.]]]])] 1.]]]])]
class TestRoiPool(object): class TestRoiPool:
def test_roipool_gradcheck(self): def test_roipool_gradcheck(self):
if not torch.cuda.is_available(): if not torch.cuda.is_available():
......
...@@ -14,7 +14,7 @@ else: ...@@ -14,7 +14,7 @@ else:
import re import re
class TestSyncBN(object): class TestSyncBN:
def dist_init(self): def dist_init(self):
rank = int(os.environ['SLURM_PROCID']) rank = int(os.environ['SLURM_PROCID'])
......
...@@ -30,7 +30,7 @@ if not is_tensorrt_plugin_loaded(): ...@@ -30,7 +30,7 @@ if not is_tensorrt_plugin_loaded():
class WrapFunction(nn.Module): class WrapFunction(nn.Module):
def __init__(self, wrapped_function): def __init__(self, wrapped_function):
super(WrapFunction, self).__init__() super().__init__()
self.wrapped_function = wrapped_function self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
...@@ -576,7 +576,7 @@ def test_cummin_cummax(func: Callable): ...@@ -576,7 +576,7 @@ def test_cummin_cummax(func: Callable):
input_list = [ input_list = [
# arbitrary shape, e.g. 1-D, 2-D, 3-D, ... # arbitrary shape, e.g. 1-D, 2-D, 3-D, ...
torch.rand((2, 3, 4, 1, 5)).cuda(), torch.rand((2, 3, 4, 1, 5)).cuda(),
torch.rand((1)).cuda() torch.rand(1).cuda()
] ]
input_names = ['input'] input_names = ['input']
...@@ -756,7 +756,7 @@ def test_corner_pool(mode): ...@@ -756,7 +756,7 @@ def test_corner_pool(mode):
class CornerPoolWrapper(CornerPool): class CornerPoolWrapper(CornerPool):
def __init__(self, mode): def __init__(self, mode):
super(CornerPoolWrapper, self).__init__(mode) super().__init__(mode)
def forward(self, x): def forward(self, x):
# no use `torch.cummax`, instead `corner_pool` is used # no use `torch.cummax`, instead `corner_pool` is used
......
...@@ -10,7 +10,7 @@ except ImportError: ...@@ -10,7 +10,7 @@ except ImportError:
_USING_PARROTS = False _USING_PARROTS = False
class TestUpFirDn2d(object): class TestUpFirDn2d:
"""Unit test for UpFirDn2d. """Unit test for UpFirDn2d.
Here, we just test the basic case of upsample version. More gerneal tests Here, we just test the basic case of upsample version. More gerneal tests
......
...@@ -96,8 +96,8 @@ def test_voxelization_nondeterministic(): ...@@ -96,8 +96,8 @@ def test_voxelization_nondeterministic():
coors_all = dynamic_voxelization.forward(points) coors_all = dynamic_voxelization.forward(points)
coors_all = coors_all.cpu().detach().numpy().tolist() coors_all = coors_all.cpu().detach().numpy().tolist()
coors_set = set([tuple(c) for c in coors]) coors_set = {tuple(c) for c in coors}
coors_all_set = set([tuple(c) for c in coors_all]) coors_all_set = {tuple(c) for c in coors_all}
assert len(coors_set) == len(coors) assert len(coors_set) == len(coors)
assert len(coors_set - coors_all_set) == 0 assert len(coors_set - coors_all_set) == 0
...@@ -112,7 +112,7 @@ def test_voxelization_nondeterministic(): ...@@ -112,7 +112,7 @@ def test_voxelization_nondeterministic():
for c, ps, n in zip(coors, voxels, num_points_per_voxel): for c, ps, n in zip(coors, voxels, num_points_per_voxel):
ideal_voxel_points_set = coors_points_dict[tuple(c)] ideal_voxel_points_set = coors_points_dict[tuple(c)]
voxel_points_set = set([tuple(p) for p in ps[:n]]) voxel_points_set = {tuple(p) for p in ps[:n]}
assert len(voxel_points_set) == n assert len(voxel_points_set) == n
if n < max_num_points: if n < max_num_points:
assert voxel_points_set == ideal_voxel_points_set assert voxel_points_set == ideal_voxel_points_set
...@@ -133,7 +133,7 @@ def test_voxelization_nondeterministic(): ...@@ -133,7 +133,7 @@ def test_voxelization_nondeterministic():
voxels, coors, num_points_per_voxel = hard_voxelization.forward(points) voxels, coors, num_points_per_voxel = hard_voxelization.forward(points)
coors = coors.cpu().detach().numpy().tolist() coors = coors.cpu().detach().numpy().tolist()
coors_set = set([tuple(c) for c in coors]) coors_set = {tuple(c) for c in coors}
coors_all_set = set([tuple(c) for c in coors_all]) coors_all_set = {tuple(c) for c in coors_all}
assert len(coors_set) == len(coors) == len(coors_all_set) assert len(coors_set) == len(coors) == len(coors_all_set)
...@@ -63,7 +63,7 @@ def test_is_module_wrapper(): ...@@ -63,7 +63,7 @@ def test_is_module_wrapper():
# test module wrapper registry # test module wrapper registry
@MODULE_WRAPPERS.register_module() @MODULE_WRAPPERS.register_module()
class ModuleWrapper(object): class ModuleWrapper:
def __init__(self, module): def __init__(self, module):
self.module = module self.module = module
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment