Unverified Commit b6a7fd98 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Upgrade pre commit hooks (#2321)

* Upgrade the versions of pre-commit hooks

* update the versions of zh-cn.yaml
parent a5db5f66
exclude: ^tests/data/
repos:
- repo: https://gitee.com/openmmlab/mirrors-flake8
rev: 3.8.3
rev: 5.0.4
hooks:
- id: flake8
- repo: https://gitee.com/openmmlab/mirrors-isort
......@@ -9,11 +9,11 @@ repos:
hooks:
- id: isort
- repo: https://gitee.com/openmmlab/mirrors-yapf
rev: v0.30.0
rev: v0.32.0
hooks:
- id: yapf
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
rev: v3.1.0
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
......@@ -26,7 +26,7 @@ repos:
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://gitee.com/openmmlab/mirrors-codespell
rev: v2.1.0
rev: v2.2.1
hooks:
- id: codespell
- repo: https://gitee.com/openmmlab/mirrors-mdformat
......@@ -44,7 +44,7 @@ repos:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/asottile/pyupgrade
rev: v2.32.1
rev: v3.0.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]
......
exclude: ^tests/data/
repos:
- repo: https://github.com/PyCQA/flake8
rev: 3.8.3
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
......@@ -9,11 +9,11 @@ repos:
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.30.0
rev: v0.32.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.1.0
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
......@@ -26,7 +26,7 @@ repos:
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
rev: v2.2.1
hooks:
- id: codespell
- repo: https://github.com/executablebooks/mdformat
......@@ -44,7 +44,7 @@ repos:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/asottile/pyupgrade
rev: v2.32.1
rev: v3.0.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]
......
......@@ -513,7 +513,6 @@ def batch_counter_hook(module: nn.Module, input: tuple, output: Any) -> None:
def add_batch_counter_variables_or_reset(module: nn.Module) -> None:
module.__batch_counter__ = 0
......
......@@ -213,7 +213,8 @@ class ConstantInit(BaseInit):
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
described in `Understanding the difficulty of training deep feedforward.
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
......@@ -271,7 +272,6 @@ class NormalInit(BaseInit):
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, mean: float = 0, std: float = 1, **kwargs):
......@@ -317,7 +317,6 @@ class TruncNormalInit(BaseInit):
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
......@@ -401,7 +400,8 @@ class UniformInit(BaseInit):
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
described in `Delving deep into rectifiers: Surpassing human-level.
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
......@@ -688,12 +688,12 @@ def trunc_normal_(tensor: Tensor,
std: float = 1.,
a: float = -2.,
b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
r"""Fills the input Tensor with values drawn from a truncated normal
distribution. The values are effectively drawn from the normal distribution
:math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside
:math:`[a, b]` redrawn until they are within the bounds. The method used
for generating the random values works best when :math:`a \leq \text{mean}
\leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
......
......@@ -276,7 +276,6 @@ def ycbcr2bgr(img: np.ndarray) -> np.ndarray:
def convert_color_factory(src: str, dst: str) -> Callable:
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
def convert_color(img: np.ndarray) -> np.ndarray:
......
......@@ -6,7 +6,6 @@ import torch
def is_custom_op_loaded() -> bool:
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
......
......@@ -47,12 +47,12 @@ class ActiveRotatedFilterFunction(Function):
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:
"""
Args:
grad_output (torch.Tensor): The gradiant of output features
grad_output (torch.Tensor): The gradient of output features
with shape [num_output_planes * num_rotations,
num_input_planes * num_orientations, H, W].
Returns:
torch.Tensor: The gradiant of input features with shape
torch.Tensor: The gradient of input features with shape
[num_output_planes, num_input_planes, num_orientations, H, W].
"""
input, indices = ctx.saved_tensors
......
......@@ -116,7 +116,7 @@ class CorrelationFunction(Function):
class Correlation(nn.Module):
r"""Correlation operator
r"""Correlation operator.
This correlation operator works for optical flow correlation computation.
......
......@@ -161,7 +161,7 @@ void PSAMaskForwardMLUKernelLauncher(const int psa_type, const Tensor x,
TORCH_CHECK(h_feature * w_feature == y_c,
"channel of y should be the same as h_feature * w_feature");
TORCH_CHECK(psa_type == 0 || psa_type == 1,
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently");
"psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
if (x.numel() == 0) {
CNLOG(INFO) << "skip zero-element tensor";
......@@ -227,7 +227,7 @@ void PSAMaskBackwardMLUKernelLauncher(const int psa_type, const Tensor dy,
TORCH_CHECK(h_mask * w_mask == dx_c,
"channel of dx should be the same as h_mask * w_mask");
TORCH_CHECK(psa_type == 0 || psa_type == 1,
"psa_type only suppurts 'COLLECT' and 'DISTRIBUTE' currently");
"psa_type only supports 'COLLECT' and 'DISTRIBUTE' currently");
if (dx.numel() == 0) {
CNLOG(INFO) << "skip zero-element tensor";
......
......@@ -45,7 +45,7 @@ void ROIAlignForwardMLUKernelLauncher(Tensor input, Tensor rois, Tensor output,
input.dim(), "D");
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
"D");
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently");
TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
auto memory_format =
torch_mlu::cnnl::ops::get_channels_last_memory_format(input.dim());
......@@ -121,7 +121,7 @@ void ROIAlignBackwardMLUKernelLauncher(Tensor grad, Tensor rois,
"D");
TORCH_CHECK(rois.dim() == 2, "rois should be a 2d tensor, got ", rois.dim(),
"D");
TORCH_CHECK(pool_mode == 1, "pool_mode only suppurts 'avg' currently");
TORCH_CHECK(pool_mode == 1, "pool_mode only supports 'avg' currently");
int batch_size = grad_input.size(0);
int channels = grad_input.size(1);
......
......@@ -66,11 +66,11 @@ class RotatedFeatureAlignFunction(Function):
def backward(ctx: Any, grad_output: torch.Tensor) -> tuple:
"""
Args:
grad_output (torch.Tensor): The gradiant of output features
grad_output (torch.Tensor): The gradient of output features
with shape [N,C,H,W].
Returns:
torch.Tensor: The gradiant of input features with shape [N,C,H,W].
torch.Tensor: The gradient of input features with shape [N,C,H,W].
"""
best_rbboxes = ctx.saved_tensors[0]
points = ctx.points
......
......@@ -53,10 +53,9 @@ class SparseModule(nn.Module):
class SparseSequential(SparseModule):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor.
Alternatively, an ordered dict of modules can also be passed in.
r"""A sequential container. Modules will be added to it in the order they
are passed in the constructor. Alternatively, an ordered dict of modules
can also be passed in.
To make it easier to understand, given is a small example::
......
......@@ -42,7 +42,7 @@ class Cache:
class VideoReader:
"""Video class with similar usage to a list object.
This video warpper class provides convenient apis to access frames.
This video wrapper class provides convenient apis to access frames.
There exists an issue of OpenCV's VideoCapture class that jumping to a
certain frame may be inaccurate. It is fixed in this class by checking
the position after jumping each time.
......
......@@ -23,4 +23,4 @@ default_section = THIRDPARTY
# than "BA"
[codespell]
quiet-level = 3
ignore-words-list = inout,hist,ba
ignore-words-list = inout,hist,ba,inh,ro,tne,warmup,warpped,warpping
......@@ -208,7 +208,6 @@ def relu_forward(self, x):
@patch('torch.nn.BatchNorm2d.forward', bn_forward)
@patch('torch.nn.Conv2d.forward', conv_forward)
def test_order():
with pytest.raises(AssertionError):
# order must be a tuple
order = ['conv', 'norm', 'act']
......
......@@ -5,7 +5,6 @@ from mmcv.cnn.bricks import GeneralizedAttention
def test_context_block():
# test attention_type='1000'
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='1000')
......
......@@ -14,7 +14,6 @@ from mmcv.runner import ModuleList
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
......@@ -320,7 +319,6 @@ def test_patch_embed():
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
......
......@@ -333,7 +333,6 @@ def test_linear(in_w, in_h, in_feature, out_feature):
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10))
def test_nn_op_forward_called():
for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward:
# randn input
......
......@@ -58,7 +58,6 @@ class ToyModel(nn.Module):
@skip_no_ipu
def test_ipu_hook_wrapper(tmp_path):
model = ToyModel()
dummy_input = {
'data': {
......
......@@ -172,7 +172,6 @@ def run_model(ipu_options,
@skip_no_ipu
def test_run_model():
# test feature alignment not support gradientAccumulation mode
options_cfg = dict(
randomSeed=888,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment