Unverified Commit b6a7fd98 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Upgrade pre commit hooks (#2321)

* Upgrade the versions of pre-commit hooks

* update the versions of zh-cn.yaml
parent a5db5f66
exclude: ^tests/data/ exclude: ^tests/data/
repos: repos:
- repo: https://gitee.com/openmmlab/mirrors-flake8 - repo: https://gitee.com/openmmlab/mirrors-flake8
rev: 3.8.3 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8
- repo: https://gitee.com/openmmlab/mirrors-isort - repo: https://gitee.com/openmmlab/mirrors-isort
...@@ -9,11 +9,11 @@ repos: ...@@ -9,11 +9,11 @@ repos:
hooks: hooks:
- id: isort - id: isort
- repo: https://gitee.com/openmmlab/mirrors-yapf - repo: https://gitee.com/openmmlab/mirrors-yapf
rev: v0.30.0 rev: v0.32.0
hooks: hooks:
- id: yapf - id: yapf
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
rev: v3.1.0 rev: v4.3.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
- id: check-yaml - id: check-yaml
...@@ -26,7 +26,7 @@ repos: ...@@ -26,7 +26,7 @@ repos:
- id: mixed-line-ending - id: mixed-line-ending
args: ["--fix=lf"] args: ["--fix=lf"]
- repo: https://gitee.com/openmmlab/mirrors-codespell - repo: https://gitee.com/openmmlab/mirrors-codespell
rev: v2.1.0 rev: v2.2.1
hooks: hooks:
- id: codespell - id: codespell
- repo: https://gitee.com/openmmlab/mirrors-mdformat - repo: https://gitee.com/openmmlab/mirrors-mdformat
...@@ -44,7 +44,7 @@ repos: ...@@ -44,7 +44,7 @@ repos:
- id: docformatter - id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"] args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: v2.32.1 rev: v3.0.0
hooks: hooks:
- id: pyupgrade - id: pyupgrade
args: ["--py36-plus"] args: ["--py36-plus"]
......
exclude: ^tests/data/ exclude: ^tests/data/
repos: repos:
- repo: https://github.com/PyCQA/flake8 - repo: https://github.com/PyCQA/flake8
rev: 3.8.3 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8
- repo: https://github.com/PyCQA/isort - repo: https://github.com/PyCQA/isort
...@@ -9,11 +9,11 @@ repos: ...@@ -9,11 +9,11 @@ repos:
hooks: hooks:
- id: isort - id: isort
- repo: https://github.com/pre-commit/mirrors-yapf - repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.30.0 rev: v0.32.0
hooks: hooks:
- id: yapf - id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.1.0 rev: v4.3.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
- id: check-yaml - id: check-yaml
...@@ -26,7 +26,7 @@ repos: ...@@ -26,7 +26,7 @@ repos:
- id: mixed-line-ending - id: mixed-line-ending
args: ["--fix=lf"] args: ["--fix=lf"]
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.1.0 rev: v2.2.1
hooks: hooks:
- id: codespell - id: codespell
- repo: https://github.com/executablebooks/mdformat - repo: https://github.com/executablebooks/mdformat
...@@ -44,7 +44,7 @@ repos: ...@@ -44,7 +44,7 @@ repos:
- id: docformatter - id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"] args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: v2.32.1 rev: v3.0.0
hooks: hooks:
- id: pyupgrade - id: pyupgrade
args: ["--py36-plus"] args: ["--py36-plus"]
......
...@@ -513,7 +513,6 @@ def batch_counter_hook(module: nn.Module, input: tuple, output: Any) -> None: ...@@ -513,7 +513,6 @@ def batch_counter_hook(module: nn.Module, input: tuple, output: Any) -> None:
def add_batch_counter_variables_or_reset(module: nn.Module) -> None: def add_batch_counter_variables_or_reset(module: nn.Module) -> None:
module.__batch_counter__ = 0 module.__batch_counter__ = 0
......
...@@ -213,7 +213,8 @@ class ConstantInit(BaseInit): ...@@ -213,7 +213,8 @@ class ConstantInit(BaseInit):
@INITIALIZERS.register_module(name='Xavier') @INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit): class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward described in `Understanding the difficulty of training deep feedforward.
neural networks - Glorot, X. & Bengio, Y. (2010). neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_ <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
...@@ -271,7 +272,6 @@ class NormalInit(BaseInit): ...@@ -271,7 +272,6 @@ class NormalInit(BaseInit):
Defaults to None. Defaults to None.
layer (str | list[str], optional): the layer will be initialized. layer (str | list[str], optional): the layer will be initialized.
Defaults to None. Defaults to None.
""" """
def __init__(self, mean: float = 0, std: float = 1, **kwargs): def __init__(self, mean: float = 0, std: float = 1, **kwargs):
...@@ -317,7 +317,6 @@ class TruncNormalInit(BaseInit): ...@@ -317,7 +317,6 @@ class TruncNormalInit(BaseInit):
Defaults to None. Defaults to None.
layer (str | list[str], optional): the layer will be initialized. layer (str | list[str], optional): the layer will be initialized.
Defaults to None. Defaults to None.
""" """
def __init__(self, def __init__(self,
...@@ -401,7 +400,8 @@ class UniformInit(BaseInit): ...@@ -401,7 +400,8 @@ class UniformInit(BaseInit):
@INITIALIZERS.register_module(name='Kaiming') @INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit): class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level described in `Delving deep into rectifiers: Surpassing human-level.
performance on ImageNet classification - He, K. et al. (2015). performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/ <https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_ papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
...@@ -688,12 +688,12 @@ def trunc_normal_(tensor: Tensor, ...@@ -688,12 +688,12 @@ def trunc_normal_(tensor: Tensor,
std: float = 1., std: float = 1.,
a: float = -2., a: float = -2.,
b: float = 2.) -> Tensor: b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated r"""Fills the input Tensor with values drawn from a truncated normal
normal distribution. The values are effectively drawn from the distribution. The values are effectively drawn from the normal distribution
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside
with values outside :math:`[a, b]` redrawn until they are within :math:`[a, b]` redrawn until they are within the bounds. The method used
the bounds. The method used for generating the random values works for generating the random values works best when :math:`a \leq \text{mean}
best when :math:`a \leq \text{mean} \leq b`. \leq b`.
Modified from Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
......
...@@ -276,7 +276,6 @@ def ycbcr2bgr(img: np.ndarray) -> np.ndarray: ...@@ -276,7 +276,6 @@ def ycbcr2bgr(img: np.ndarray) -> np.ndarray:
def convert_color_factory(src: str, dst: str) -> Callable: def convert_color_factory(src: str, dst: str) -> Callable:
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
def convert_color(img: np.ndarray) -> np.ndarray: def convert_color(img: np.ndarray) -> np.ndarray:
......
...@@ -6,7 +6,6 @@ import torch ...@@ -6,7 +6,6 @@ import torch
def is_custom_op_loaded() -> bool: def is_custom_op_loaded() -> bool:
# Following strings of text style are from colorama package # Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m' bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m' red_text, blue_text = '\x1b[31m', '\x1b[34m'
......
...@@ -47,12 +47,12 @@ class ActiveRotatedFilterFunction(Function): ...@@ -47,12 +47,12 @@ class ActiveRotatedFilterFunction(Function):
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]: def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:
""" """
Args: Args:
grad_output (torch.Tensor): The gradiant of output features grad_output (torch.Tensor): The gradient of output features
with shape [num_output_planes * num_rotations, with shape [num_output_planes * num_rotations,
num_input_planes * num_orientations, H, W]. num_input_planes * num_orientations, H, W].
Returns: Returns:
torch.Tensor: The gradiant of input features with shape torch.Tensor: The gradient of input features with shape
[num_output_planes, num_input_planes, num_orientations, H, W]. [num_output_planes, num_input_planes, num_orientations, H, W].
""" """
input, indices = ctx.saved_tensors input, indices = ctx.saved_tensors
......
...@@ -116,7 +116,7 @@ class CorrelationFunction(Function): ...@@ -116,7 +116,7 @@ class CorrelationFunction(Function):
class Correlation(nn.Module): class Correlation(nn.Module):
r"""Correlation operator r"""Correlation operator.
This correlation operator works for optical flow correlation computation. This correlation operator works for optical flow correlation computation.
......
...@@ -161,7 +161,7 @@ void PSAMaskForwardMLUKernelLauncher(const int psa_type, const Tensor x, ...@@ -161,7 +161,7 @@ void PSAMaskForwardMLUKernelLauncher(const int psa_type, const Tensor x,
TORCH_CHECK(h_feature * w_feature == y_c, TORCH_CHECK(h_feature * w_feature == y_c,
"channel of y should be the same as h_feature * w_feature"); "channel of y should be the same as h_feature * w_feature");
TORCH_CHECK(psa_type == 0 || psa_type == 1, TORCH_CHECK(psa_type == 0 || psa_type == 1,
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently"); "psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
if (x.numel() == 0) { if (x.numel() == 0) {
CNLOG(INFO) << "skip zero-element tensor"; CNLOG(INFO) << "skip zero-element tensor";
...@@ -227,7 +227,7 @@ void PSAMaskBackwardMLUKernelLauncher(const int psa_type, const Tensor dy, ...@@ -227,7 +227,7 @@ void PSAMaskBackwardMLUKernelLauncher(const int psa_type, const Tensor dy,
TORCH_CHECK(h_mask * w_mask == dx_c, TORCH_CHECK(h_mask * w_mask == dx_c,
"channel of dx should be the same as h_mask * w_mask"); "channel of dx should be the same as h_mask * w_mask");
TORCH_CHECK(psa_type == 0 || psa_type == 1, TORCH_CHECK(psa_type == 0 || psa_type == 1,
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently"); "psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
if (dx.numel() == 0) { if (dx.numel() == 0) {
CNLOG(INFO) << "skip zero-element tensor"; CNLOG(INFO) << "skip zero-element tensor";
......
...@@ -45,7 +45,7 @@ void ROIAlignForwardMLUKernelLauncher(Tensor input, Tensor rois, Tensor output, ...@@ -45,7 +45,7 @@ void ROIAlignForwardMLUKernelLauncher(Tensor input, Tensor rois, Tensor output,
input.dim(), "D"); input.dim(), "D");
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(), TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
"D"); "D");
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently"); TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
auto memory_format = auto memory_format =
torch_mlu::cnnl::ops::get_channels_last_memory_format(input.dim()); torch_mlu::cnnl::ops::get_channels_last_memory_format(input.dim());
...@@ -121,7 +121,7 @@ void ROIAlignBackwardMLUKernelLauncher(Tensor grad, Tensor rois, ...@@ -121,7 +121,7 @@ void ROIAlignBackwardMLUKernelLauncher(Tensor grad, Tensor rois,
"D"); "D");
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(), TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
"D"); "D");
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently"); TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
int batch_size = grad_input.size(0); int batch_size = grad_input.size(0);
int channels = grad_input.size(1); int channels = grad_input.size(1);
......
...@@ -66,11 +66,11 @@ class RotatedFeatureAlignFunction(Function): ...@@ -66,11 +66,11 @@ class RotatedFeatureAlignFunction(Function):
def backward(ctx: Any, grad_output: torch.Tensor) -> tuple: def backward(ctx: Any, grad_output: torch.Tensor) -> tuple:
""" """
Args: Args:
grad_output (torch.Tensor): The gradiant of output features grad_output (torch.Tensor): The gradient of output features
with shape [N,C,H,W]. with shape [N,C,H,W].
Returns: Returns:
torch.Tensor: The gradiant of input features with shape [N,C,H,W]. torch.Tensor: The gradient of input features with shape [N,C,H,W].
""" """
best_rbboxes = ctx.saved_tensors[0] best_rbboxes = ctx.saved_tensors[0]
points = ctx.points points = ctx.points
......
...@@ -53,10 +53,9 @@ class SparseModule(nn.Module): ...@@ -53,10 +53,9 @@ class SparseModule(nn.Module):
class SparseSequential(SparseModule): class SparseSequential(SparseModule):
r"""A sequential container. r"""A sequential container. Modules will be added to it in the order they
Modules will be added to it in the order they are passed in the are passed in the constructor. Alternatively, an ordered dict of modules
constructor. can also be passed in.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, given is a small example:: To make it easier to understand, given is a small example::
......
...@@ -42,7 +42,7 @@ class Cache: ...@@ -42,7 +42,7 @@ class Cache:
class VideoReader: class VideoReader:
"""Video class with similar usage to a list object. """Video class with similar usage to a list object.
This video warpper class provides convenient apis to access frames. This video wrapper class provides convenient apis to access frames.
There exists an issue of OpenCV's VideoCapture class that jumping to a There exists an issue of OpenCV's VideoCapture class that jumping to a
certain frame may be inaccurate. It is fixed in this class by checking certain frame may be inaccurate. It is fixed in this class by checking
the position after jumping each time. the position after jumping each time.
......
...@@ -23,4 +23,4 @@ default_section = THIRDPARTY ...@@ -23,4 +23,4 @@ default_section = THIRDPARTY
# than "BA" # than "BA"
[codespell] [codespell]
quiet-level = 3 quiet-level = 3
ignore-words-list = inout,hist,ba ignore-words-list = inout,hist,ba,inh,ro,tne,warmup,warpped,warpping
...@@ -208,7 +208,6 @@ def relu_forward(self, x): ...@@ -208,7 +208,6 @@ def relu_forward(self, x):
@patch('torch.nn.BatchNorm2d.forward', bn_forward) @patch('torch.nn.BatchNorm2d.forward', bn_forward)
@patch('torch.nn.Conv2d.forward', conv_forward) @patch('torch.nn.Conv2d.forward', conv_forward)
def test_order(): def test_order():
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
# order must be a tuple # order must be a tuple
order = ['conv', 'norm', 'act'] order = ['conv', 'norm', 'act']
......
...@@ -5,7 +5,6 @@ from mmcv.cnn.bricks import GeneralizedAttention ...@@ -5,7 +5,6 @@ from mmcv.cnn.bricks import GeneralizedAttention
def test_context_block(): def test_context_block():
# test attention_type='1000' # test attention_type='1000'
imgs = torch.randn(2, 16, 20, 20) imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='1000') gen_attention_block = GeneralizedAttention(16, attention_type='1000')
......
...@@ -14,7 +14,6 @@ from mmcv.runner import ModuleList ...@@ -14,7 +14,6 @@ from mmcv.runner import ModuleList
def test_adaptive_padding(): def test_adaptive_padding():
for padding in ('same', 'corner'): for padding in ('same', 'corner'):
kernel_size = 16 kernel_size = 16
stride = 16 stride = 16
...@@ -320,7 +319,6 @@ def test_patch_embed(): ...@@ -320,7 +319,6 @@ def test_patch_embed():
def test_patch_merging(): def test_patch_merging():
# Test the model with int padding # Test the model with int padding
in_c = 3 in_c = 3
out_c = 4 out_c = 4
......
...@@ -333,7 +333,6 @@ def test_linear(in_w, in_h, in_feature, out_feature): ...@@ -333,7 +333,6 @@ def test_linear(in_w, in_h, in_feature, out_feature):
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10)) @patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10))
def test_nn_op_forward_called(): def test_nn_op_forward_called():
for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']: for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward: with patch(f'torch.nn.{m}.forward') as nn_module_forward:
# randn input # randn input
......
...@@ -58,7 +58,6 @@ class ToyModel(nn.Module): ...@@ -58,7 +58,6 @@ class ToyModel(nn.Module):
@skip_no_ipu @skip_no_ipu
def test_ipu_hook_wrapper(tmp_path): def test_ipu_hook_wrapper(tmp_path):
model = ToyModel() model = ToyModel()
dummy_input = { dummy_input = {
'data': { 'data': {
......
...@@ -172,7 +172,6 @@ def run_model(ipu_options, ...@@ -172,7 +172,6 @@ def run_model(ipu_options,
@skip_no_ipu @skip_no_ipu
def test_run_model(): def test_run_model():
# test feature alignment not support gradientAccumulation mode # test feature alignment not support gradientAccumulation mode
options_cfg = dict( options_cfg = dict(
randomSeed=888, randomSeed=888,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment