Unverified Commit b6a7fd98 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Upgrade pre commit hooks (#2321)

* Upgrade the versions of pre-commit hooks

* update the versions of zh-cn.yaml
parent a5db5f66
......@@ -173,7 +173,6 @@ def test_cast_to_options():
@skip_no_ipu
def test_model_sharding():
model = ToyModel()
split_edges = [dict(layer_to_call='666', ipu_id=0)]
......
......@@ -13,7 +13,6 @@ except ImportError:
@pytest.mark.skipif(torch is None, reason='requires torch library')
def test_tensor2imgs():
# test tensor obj
with pytest.raises(AssertionError):
tensor = np.random.rand(2, 3, 3)
......
......@@ -13,7 +13,6 @@ gt_input1_grad = [[[[1., 2., 3.], [3., 1., 2.], [8., 5., 2.]]]]
def assert_equal_tensor(tensor_a, tensor_b):
assert tensor_a.eq(tensor_b).all()
......
......@@ -22,7 +22,6 @@ except ImportError:
not torch.cuda.is_available(), reason='requires CUDA support'))
])
def test_multiscale_deformable_attention(device_type):
with pytest.raises(ValueError):
# embed_dims must be divisible by num_heads,
MultiScaleDeformableAttention(
......
......@@ -601,7 +601,6 @@ def test_rotated_feature_align():
@pytest.mark.parametrize('mode', ['top', 'bottom', 'left', 'right'])
def test_corner_pool(mode, opset=11):
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
if not os.path.exists(ort_custom_op_path):
......@@ -646,7 +645,6 @@ def test_corner_pool(mode, opset=11):
@pytest.mark.parametrize('key', ['cummax', 'cummin'])
def test_cummax_cummin(key, opset=11):
# Note generally `cummax` or `cummin` is exportable to ONNX
# as long as the pytorch version >= 1.5.0, since `torch.cummax`
# is only supported with torch >= 1.5.0.
......
......@@ -112,7 +112,6 @@ def test_points_in_boxes_cpu():
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_points_in_boxes_all():
boxes = torch.tensor(
[[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
......
......@@ -6,7 +6,6 @@ from mmcv.ops import SAConv2d
def test_sacconv():
# test with normal cast
x = torch.rand(1, 3, 256, 256)
saconv = SAConv2d(3, 5, kernel_size=3, padding=1)
......
......@@ -653,7 +653,6 @@ def test_cummin_cummax(func: Callable):
@pytest.mark.parametrize('dynamic_export', [True, False])
@pytest.mark.parametrize('fp16_mode', [True, False])
def test_instance_norm(dynamic_export, fp16_mode):
n, c, h, w = 2, 3, 10, 10
data = torch.randn(n, c, h, w).cuda()
norm = nn.InstanceNorm2d(c, affine=True)
......
......@@ -437,7 +437,6 @@ def test_without_layer_weight_init():
def test_override_weight_init():
# only initialize 'override'
model_cfg = dict(
type='FooModel',
......
......@@ -148,7 +148,6 @@ def test_get_state_dict():
def test_load_pavimodel_dist():
sys.modules['pavi'] = MagicMock()
sys.modules['pavi.modelcloud'] = MagicMock()
pavimodel = Mockpavimodel()
......
......@@ -69,7 +69,6 @@ class Model(nn.Module):
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
......@@ -79,7 +78,6 @@ def _build_epoch_runner():
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
......
......@@ -66,7 +66,6 @@ def test_cast_tensor_type():
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
......@@ -192,7 +191,6 @@ def test_auto_fp16():
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
......
......@@ -56,7 +56,6 @@ def test_build_runner():
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_epoch_based_runner(runner_class):
with pytest.warns(DeprecationWarning):
# batch_processor is deprecated
model = OldStyleModel()
......
......@@ -366,7 +366,6 @@ def test_merge_delete():
def test_merge_intermediate_variable():
cfg_file = osp.join(data_path, 'config/i_child.py')
cfg = Config.fromfile(cfg_file)
# cfg.field
......
......@@ -142,7 +142,6 @@ def test_flow2rgb():
def test_flow_warp():
img = np.zeros((5, 5, 3))
img[2, 2, 0] = 1
flow = np.ones((5, 5, 2))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment