Unverified Commit f22c9eb4 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Add code spell hook to pre-commit and fix typos (#1384)

* Add code-spell hook to pre-commit

* Add code-spell hook to pre-commit and fix typos

* fix setup.cfg
parent c934605e
...@@ -15,7 +15,7 @@ try: ...@@ -15,7 +15,7 @@ try:
# If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
# and used; otherwise, auto fp16 will adopt mmcv's implementation. # and used; otherwise, auto fp16 will adopt mmcv's implementation.
# Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
# manually, so the behavior may not be consistant with real amp. # manually, so the behavior may not be consistent with real amp.
from torch.cuda.amp import autocast from torch.cuda.amp import autocast
except ImportError: except ImportError:
pass pass
......
...@@ -28,7 +28,7 @@ class MlflowLoggerHook(LoggerHook): ...@@ -28,7 +28,7 @@ class MlflowLoggerHook(LoggerHook):
tags (dict of str: str, optional): Tags for the current run. tags (dict of str: str, optional): Tags for the current run.
Default None. Default None.
If not None, set tags for the current run. If not None, set tags for the current run.
log_model (bool, optional): Wheter to log an MLflow artifact. log_model (bool, optional): Whether to log an MLflow artifact.
Default True. Default True.
If True, log runner.model as an MLflow artifact If True, log runner.model as an MLflow artifact
for the current run. for the current run.
......
...@@ -53,7 +53,7 @@ class DefaultOptimizerConstructor: ...@@ -53,7 +53,7 @@ class DefaultOptimizerConstructor:
offset layer in deformable convs, set ``dcn_offset_lr_mult`` offset layer in deformable convs, set ``dcn_offset_lr_mult``
to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``. to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``.
2. If the option ``dcn_offset_lr_mult`` is used, the constructor will 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
apply it to all the DCN layers in the model. So be carefull when apply it to all the DCN layers in the model. So be careful when
the model contains multiple DCN layers in places other than the model contains multiple DCN layers in places other than
backbone. backbone.
......
...@@ -257,7 +257,7 @@ class Config: ...@@ -257,7 +257,7 @@ class Config:
f'Duplicate keys: {duplicate_keys}') f'Duplicate keys: {duplicate_keys}')
base_cfg_dict.update(c) base_cfg_dict.update(c)
# Subtitute base variables from strings to their actual values # Substitute base variables from strings to their actual values
cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,
base_cfg_dict) base_cfg_dict)
......
...@@ -47,7 +47,7 @@ def scandir(dir_path, suffix=None, recursive=False): ...@@ -47,7 +47,7 @@ def scandir(dir_path, suffix=None, recursive=False):
directory. Default: False. directory. Default: False.
Returns: Returns:
A generator for all the interested files with relative pathes. A generator for all the interested files with relative paths.
""" """
if isinstance(dir_path, (str, Path)): if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path) dir_path = str(dir_path)
......
...@@ -68,7 +68,7 @@ class Registry: ...@@ -68,7 +68,7 @@ class Registry:
Please refer to Please refer to
https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for
advanced useage. advanced usage.
Args: Args:
name (str): Registry name. name (str): Registry name.
......
...@@ -33,7 +33,7 @@ def _any(judge_result): ...@@ -33,7 +33,7 @@ def _any(judge_result):
if _any(element): if _any(element):
return True return True
except TypeError: except TypeError:
# Maybe encouter the case: torch.tensor(True) | torch.tensor(False) # Maybe encounter the case: torch.tensor(True) | torch.tensor(False)
if judge_result: if judge_result:
return True return True
return False return False
......
...@@ -17,3 +17,6 @@ known_first_party = mmcv ...@@ -17,3 +17,6 @@ known_first_party = mmcv
known_third_party = addict,cv2,m2r,numpy,onnx,onnxruntime,packaging,pytest,pytorch_sphinx_theme,recommonmark,scipy,sphinx,tensorrt,torch,torchvision,yaml,yapf known_third_party = addict,cv2,m2r,numpy,onnx,onnxruntime,packaging,pytest,pytorch_sphinx_theme,recommonmark,scipy,sphinx,tensorrt,torch,torchvision,yaml,yapf
no_lines_before = STDLIB,LOCALFOLDER no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY default_section = THIRDPARTY
[codespell]
ignore-words-list = inout,hist
...@@ -88,7 +88,7 @@ class TestGeometric: ...@@ -88,7 +88,7 @@ class TestGeometric:
assert resized_img.shape == ( assert resized_img.shape == (
912, 810, 3) and h_scale == 912 / 300 and w_scale == 810 / 400 912, 810, 3) and h_scale == 912 / 300 and w_scale == 810 / 400
# one of size and scale_factor shuld be given # one of size and scale_factor should be given
with pytest.raises(ValueError): with pytest.raises(ValueError):
mmcv.imresize_to_multiple( mmcv.imresize_to_multiple(
self.img, divisor=16, size=(1000, 600), scale_factor=2) self.img, divisor=16, size=(1000, 600), scale_factor=2)
......
...@@ -15,7 +15,7 @@ boxes_arr = [[[0, 0, 2, 1], [1, 0, 3, 1], [1, 0, 2, 1], [0, 0, 3, 1], ...@@ -15,7 +15,7 @@ boxes_arr = [[[0, 0, 2, 1], [1, 0, 3, 1], [1, 0, 2, 1], [0, 0, 3, 1],
[0, 1, 1, 2], [0, 0, 3, 2], [1, 0, 3, 2], [2, 0, 3, 2]]] [0, 1, 1, 2], [0, 0, 3, 2], [1, 0, 3, 2], [2, 0, 3, 2]]]
output_dict = { output_dict = {
# [1,c,h*w,4] for each value, # [1,c,h*w,4] for each value,
# the ouput is manually checked for its correctness # the output is manually checked for its correctness
# pool_size=1 # pool_size=1
1: [[[[3., 6., 1., 2.], [4., 7., -1., 1.], [3., 7., 1., 2.], 1: [[[[3., 6., 1., 2.], [4., 7., -1., 1.], [3., 7., 1., 2.],
......
...@@ -80,7 +80,7 @@ def test_roialign(): ...@@ -80,7 +80,7 @@ def test_roialign():
opset_version=11) opset_version=11)
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(input.shape), 'input': [list(input.shape),
list(input.shape), list(input.shape),
...@@ -141,7 +141,7 @@ def test_nms(): ...@@ -141,7 +141,7 @@ def test_nms():
opset_version=11) opset_version=11)
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'boxes': [list(boxes.shape), 'boxes': [list(boxes.shape),
list(boxes.shape), list(boxes.shape),
...@@ -220,7 +220,7 @@ def test_batched_nms(): ...@@ -220,7 +220,7 @@ def test_batched_nms():
output_names=output_names, output_names=output_names,
opset_version=11) opset_version=11)
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'boxes': [list(boxes.shape), 'boxes': [list(boxes.shape),
list(boxes.shape), list(boxes.shape),
...@@ -295,7 +295,7 @@ def test_scatternd(): ...@@ -295,7 +295,7 @@ def test_scatternd():
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(data.shape), 'input': [list(data.shape),
list(data.shape), list(data.shape),
...@@ -372,7 +372,7 @@ def test_deform_conv(): ...@@ -372,7 +372,7 @@ def test_deform_conv():
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(x.shape), list(x.shape), 'input': [list(x.shape), list(x.shape),
list(x.shape)], list(x.shape)],
...@@ -443,7 +443,7 @@ def test_modulated_deform_conv(with_bias): ...@@ -443,7 +443,7 @@ def test_modulated_deform_conv(with_bias):
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(x.shape), list(x.shape), 'input': [list(x.shape), list(x.shape),
list(x.shape)], list(x.shape)],
...@@ -515,7 +515,7 @@ def test_grid_sample(mode, padding_mode, align_corners): ...@@ -515,7 +515,7 @@ def test_grid_sample(mode, padding_mode, align_corners):
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(input.shape), 'input': [list(input.shape),
list(input.shape), list(input.shape),
...@@ -602,7 +602,7 @@ def test_cummin_cummax(func: Callable): ...@@ -602,7 +602,7 @@ def test_cummin_cummax(func: Callable):
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': 'input':
[list(input.shape), [list(input.shape),
...@@ -688,7 +688,7 @@ def test_instance_norm(dynamic_export, fp16_mode): ...@@ -688,7 +688,7 @@ def test_instance_norm(dynamic_export, fp16_mode):
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
if dynamic_export: if dynamic_export:
opt_shape_dict = { opt_shape_dict = {
'input': 'input':
...@@ -777,7 +777,7 @@ def test_corner_pool(mode): ...@@ -777,7 +777,7 @@ def test_corner_pool(mode):
opset_version=opset) opset_version=opset)
onnx_model = onnx.load(onnx_file) onnx_model = onnx.load(onnx_file)
# create trt engine and wraper # create trt engine and wrapper
opt_shape_dict = { opt_shape_dict = {
'input': [list(input.shape), 'input': [list(input.shape),
list(input.shape), list(input.shape),
......
...@@ -29,7 +29,7 @@ def test_is_module_wrapper(): ...@@ -29,7 +29,7 @@ def test_is_module_wrapper():
return self.conv(x) return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check # _verify_model_across_ranks is added in torch1.9.0 so we should check
# wether _verify_model_across_ranks is the member of torch.distributed # whether _verify_model_across_ranks is the member of torch.distributed
# before mocking # before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'): if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock torch.distributed._verify_model_across_ranks = mock
......
...@@ -503,7 +503,7 @@ def test_sequential_model_weight_init(): ...@@ -503,7 +503,7 @@ def test_sequential_model_weight_init():
torch.full(seq_model[1].conv2d.weight.shape, 2.)) torch.full(seq_model[1].conv2d.weight.shape, 2.))
assert torch.equal(seq_model[1].conv2d.bias, assert torch.equal(seq_model[1].conv2d.bias,
torch.full(seq_model[1].conv2d.bias.shape, 3.)) torch.full(seq_model[1].conv2d.bias.shape, 3.))
# inner init_cfg has highter priority # inner init_cfg has higher priority
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg]
seq_model = Sequential( seq_model = Sequential(
*layers, *layers,
...@@ -540,7 +540,7 @@ def test_modulelist_weight_init(): ...@@ -540,7 +540,7 @@ def test_modulelist_weight_init():
torch.full(modellist[1].conv2d.weight.shape, 2.)) torch.full(modellist[1].conv2d.weight.shape, 2.))
assert torch.equal(modellist[1].conv2d.bias, assert torch.equal(modellist[1].conv2d.bias,
torch.full(modellist[1].conv2d.bias.shape, 3.)) torch.full(modellist[1].conv2d.bias.shape, 3.))
# inner init_cfg has highter priority # inner init_cfg has higher priority
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg]
modellist = ModuleList( modellist = ModuleList(
layers, layers,
......
...@@ -236,7 +236,7 @@ def test_eval_hook(): ...@@ -236,7 +236,7 @@ def test_eval_hook():
assert osp.exists(ckpt_path) assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3 assert runner.meta['hook_msgs']['best_score'] == -3
# Test the EvalHook when resume happend # Test the EvalHook when resume happened
data_loader = DataLoader(EvalDataset()) data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc') eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir: with tempfile.TemporaryDirectory() as tmpdir:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment