Unverified Commit f22c9eb4 authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Add code spell hook to pre-commit and fix typos (#1384)

* Add code-spell hook to pre-commit

* Add code-spell hook to pre-commit and fix typos

* fix setup.cfg
parent c934605e
......@@ -15,7 +15,7 @@ try:
# If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
# and used; otherwise, auto fp16 will adopt mmcv's implementation.
# Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
# manually, so the behavior may not be consistant with real amp.
# manually, so the behavior may not be consistent with real amp.
from torch.cuda.amp import autocast
except ImportError:
pass
......
......@@ -28,7 +28,7 @@ class MlflowLoggerHook(LoggerHook):
tags (dict of str: str, optional): Tags for the current run.
Default None.
If not None, set tags for the current run.
log_model (bool, optional): Wheter to log an MLflow artifact.
log_model (bool, optional): Whether to log an MLflow artifact.
Default True.
If True, log runner.model as an MLflow artifact
for the current run.
......
......@@ -53,7 +53,7 @@ class DefaultOptimizerConstructor:
offset layer in deformable convs, set ``dcn_offset_lr_mult``
to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``.
2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
apply it to all the DCN layers in the model. So be carefull when
apply it to all the DCN layers in the model. So be careful when
the model contains multiple DCN layers in places other than
backbone.
......
......@@ -257,7 +257,7 @@ class Config:
f'Duplicate keys: {duplicate_keys}')
base_cfg_dict.update(c)
# Subtitute base variables from strings to their actual values
# Substitute base variables from strings to their actual values
cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,
base_cfg_dict)
......
......@@ -47,7 +47,7 @@ def scandir(dir_path, suffix=None, recursive=False):
directory. Default: False.
Returns:
A generator for all the interested files with relative pathes.
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
......
......@@ -68,7 +68,7 @@ class Registry:
Please refer to
https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for
advanced useage.
advanced usage.
Args:
name (str): Registry name.
......
......@@ -33,7 +33,7 @@ def _any(judge_result):
if _any(element):
return True
except TypeError:
# Maybe encouter the case: torch.tensor(True) | torch.tensor(False)
# Maybe encounter the case: torch.tensor(True) | torch.tensor(False)
if judge_result:
return True
return False
......
......@@ -17,3 +17,6 @@ known_first_party = mmcv
known_third_party = addict,cv2,m2r,numpy,onnx,onnxruntime,packaging,pytest,pytorch_sphinx_theme,recommonmark,scipy,sphinx,tensorrt,torch,torchvision,yaml,yapf
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
[codespell]
ignore-words-list = inout,hist
......@@ -88,7 +88,7 @@ class TestGeometric:
assert resized_img.shape == (
912, 810, 3) and h_scale == 912 / 300 and w_scale == 810 / 400
# one of size and scale_factor shuld be given
# one of size and scale_factor should be given
with pytest.raises(ValueError):
mmcv.imresize_to_multiple(
self.img, divisor=16, size=(1000, 600), scale_factor=2)
......
......@@ -15,7 +15,7 @@ boxes_arr = [[[0, 0, 2, 1], [1, 0, 3, 1], [1, 0, 2, 1], [0, 0, 3, 1],
[0, 1, 1, 2], [0, 0, 3, 2], [1, 0, 3, 2], [2, 0, 3, 2]]]
output_dict = {
# [1,c,h*w,4] for each value,
# the ouput is manually checked for its correctness
# the output is manually checked for its correctness
# pool_size=1
1: [[[[3., 6., 1., 2.], [4., 7., -1., 1.], [3., 7., 1., 2.],
......
......@@ -80,7 +80,7 @@ def test_roialign():
opset_version=11)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(input.shape),
list(input.shape),
......@@ -141,7 +141,7 @@ def test_nms():
opset_version=11)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'boxes': [list(boxes.shape),
list(boxes.shape),
......@@ -220,7 +220,7 @@ def test_batched_nms():
output_names=output_names,
opset_version=11)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'boxes': [list(boxes.shape),
list(boxes.shape),
......@@ -295,7 +295,7 @@ def test_scatternd():
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(data.shape),
list(data.shape),
......@@ -372,7 +372,7 @@ def test_deform_conv():
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(x.shape), list(x.shape),
list(x.shape)],
......@@ -443,7 +443,7 @@ def test_modulated_deform_conv(with_bias):
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(x.shape), list(x.shape),
list(x.shape)],
......@@ -515,7 +515,7 @@ def test_grid_sample(mode, padding_mode, align_corners):
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(input.shape),
list(input.shape),
......@@ -602,7 +602,7 @@ def test_cummin_cummax(func: Callable):
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input':
[list(input.shape),
......@@ -688,7 +688,7 @@ def test_instance_norm(dynamic_export, fp16_mode):
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
if dynamic_export:
opt_shape_dict = {
'input':
......@@ -777,7 +777,7 @@ def test_corner_pool(mode):
opset_version=opset)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
# create trt engine and wrapper
opt_shape_dict = {
'input': [list(input.shape),
list(input.shape),
......
......@@ -29,7 +29,7 @@ def test_is_module_wrapper():
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# wether _verify_model_across_ranks is the member of torch.distributed
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
......
......@@ -503,7 +503,7 @@ def test_sequential_model_weight_init():
torch.full(seq_model[1].conv2d.weight.shape, 2.))
assert torch.equal(seq_model[1].conv2d.bias,
torch.full(seq_model[1].conv2d.bias.shape, 3.))
# inner init_cfg has highter priority
# inner init_cfg has higher priority
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg]
seq_model = Sequential(
*layers,
......@@ -540,7 +540,7 @@ def test_modulelist_weight_init():
torch.full(modellist[1].conv2d.weight.shape, 2.))
assert torch.equal(modellist[1].conv2d.bias,
torch.full(modellist[1].conv2d.bias.shape, 3.))
# inner init_cfg has highter priority
# inner init_cfg has higher priority
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg]
modellist = ModuleList(
layers,
......
......@@ -236,7 +236,7 @@ def test_eval_hook():
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3
# Test the EvalHook when resume happend
# Test the EvalHook when resume happened
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment