Unverified Commit 13888df2 authored by lizz's avatar lizz Committed by GitHub
Browse files

Fix typos (#1041)



* Fix typos
Signed-off-by: default avatarlizz <lizz@sensetime.com>

* Add deprecation warning
Signed-off-by: default avatarlizz <lizz@sensetime.com>
parent 9d1436fb
......@@ -370,9 +370,9 @@ Let us introduce the usage of `initialize` in detail.
`BaseModule` is inherited from `torch.nn.Module`, and the only different between them is that `BaseModule` implements `init_weight`.
`Sequential` is inhertied from `BaseModule` and `torch.nn.Sequential`.
`Sequential` is inherited from `BaseModule` and `torch.nn.Sequential`.
`ModuleList` is inhertied from `BaseModule` and `torch.nn.ModuleList`.
`ModuleList` is inherited from `BaseModule` and `torch.nn.ModuleList`.
`````python
import torch.nn as nn
......@@ -534,5 +534,5 @@ The following types are supported for `filename` argument of `mmcv.load_checkpoi
- filepath: The filepath of the checkpoint.
- `http://xxx` and `https://xxx`: The link to download the checkpoint. The `SHA256` postfix should be contained in the filename.
- `torchvison://xxx`: The model links in `torchvision.models`.Please refer to [torchvision](https://pytorch.org/docs/stable/torchvision/models.html) for details.
- `torchvision://xxx`: The model links in `torchvision.models`.Please refer to [torchvision](https://pytorch.org/docs/stable/torchvision/models.html) for details.
- `open-mmlab://xxx`: The model links or filepath provided in default and additional json files.
......@@ -88,7 +88,7 @@ Here is an example.
import torch
import onnx
from mmcv.tensorrt import (TRTWraper, onnx2trt, save_trt_engine,
from mmcv.tensorrt import (TRTWrapper, onnx2trt, save_trt_engine,
is_tensorrt_plugin_loaded)
assert is_tensorrt_plugin_loaded(), 'Requires to complie TensorRT plugins in mmcv'
......@@ -117,7 +117,7 @@ trt_engine = onnx2trt(
save_trt_engine(trt_engine, trt_file)
# Run inference with TensorRT
trt_model = TRTWraper(trt_file, ['input'], ['output'])
trt_model = TRTWrapper(trt_file, ['input'], ['output'])
with torch.no_grad():
trt_outputs = trt_model({'input': inputs})
......@@ -161,7 +161,7 @@ Below are the main steps:
### Reminders
- Some of the [custom ops](https://mmcv.readthedocs.io/en/latest/ops.html) in `mmcv` have their cuda implementations, which could be refered.
- Some of the [custom ops](https://mmcv.readthedocs.io/en/latest/ops.html) in `mmcv` have their cuda implementations, which could be referred.
## Known Issues
......
......@@ -69,7 +69,7 @@ a = 1
b = dict(b1=[0, 1, 2], b2=None)
```
#### Inherit from base config without overlaped keys
#### Inherit from base config without overlapped keys
`config_b.py`
......@@ -90,7 +90,7 @@ d = 'string'
New fields in `config_b.py` are combined with old fields in `config_a.py`
#### Inherit from base config with overlaped keys
#### Inherit from base config with overlapped keys
`config_c.py`
......@@ -203,7 +203,7 @@ for i, task in enumerate(mmcv.track_iter_progress(tasks)):
### Timer
It is convinient to compute the runtime of a code block with `Timer`.
It is convenient to compute the runtime of a code block with `Timer`.
```python
import time
......
......@@ -237,7 +237,7 @@ def print_model_with_flops(model,
>>> model = ExampleModel()
>>> x = (3, 16, 16)
to print the complexity inforamtion state for each layer, you can use
to print the complexity information state for each layer, you can use
>>> get_model_complexity_info(model, x)
or directly use
>>> print_model_with_flops(model, 4579784.0, 37361)
......
......@@ -98,7 +98,7 @@ class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if not isinstance(bias, (int, float)):
raise TypeError(f'bias must be a numbel, but got a {type(bias)}')
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if bias_prob is not None:
if not isinstance(bias_prob, float):
......
......@@ -28,13 +28,13 @@ def list_from_file(filename, prefix='', offset=0, max_num=0):
def dict_from_file(filename, key_type=str):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns splited by
Each line of the text file will be two or more columns split by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Args:
filename(str): Filename.
key_type(type): Type of the dict's keys. str is user by default and
key_type(type): Type of the dict keys. str is user by default and
type conversion will be performed if specified.
Returns:
......
......@@ -528,7 +528,7 @@ def _get_shear_matrix(magnitude, direction='horizontal'):
Args:
magnitude (int | float): The magnitude used for shear.
direction (str): Thie flip direction, either "horizontal"
direction (str): The flip direction, either "horizontal"
or "vertical".
Returns:
......@@ -552,7 +552,7 @@ def imshear(img,
img (ndarray): Image to be sheared with format (h, w)
or (h, w, c).
magnitude (int | float): The magnitude used for shear.
direction (str): Thie flip direction, either "horizontal"
direction (str): The flip direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border.
......
......@@ -140,7 +140,7 @@ def imread(img_or_path, flag='color', channel_order='bgr', backend=None):
it will be returned as is.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
Note that the `turbojpeg` backened does not support `unchanged`.
Note that the `turbojpeg` backend does not support `unchanged`.
channel_order (str): Order of channel, candidates are `bgr` and `rgb`.
backend (str | None): The image decoding backend type. Options are
`cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`.
......@@ -234,7 +234,7 @@ def imwrite(img, file_path, params=None, auto_mkdir=True):
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
params (None or list): Same as opencv :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
......
......@@ -119,7 +119,7 @@ def adjust_color(img, alpha=1, beta=None, gamma=0):
beta = 1 - alpha
colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma)
if not colored_img.dtype == np.uint8:
# Note when the dtype of `img` is not defaultly `np.uint8`
# Note when the dtype of `img` is not the default `np.uint8`
# (e.g. np.float32), the value in `colored_img` got from cv2
# is not guaranteed to be in range [0, 255], so here clip
# is needed.
......@@ -320,9 +320,9 @@ def adjust_sharpness(img, factor=1., kernel=None):
# adopted from PIL.ImageFilter.SMOOTH
kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13
assert isinstance(kernel, np.ndarray), \
f'kernel must be of type np.ndarrray, but got {type(kernel)} instead.'
f'kernel must be of type np.ndarray, but got {type(kernel)} instead.'
assert kernel.ndim == 2, \
f'kernel must have a dimention of 2, but got {kernel.ndim} instead.'
f'kernel must have a dimension of 2, but got {kernel.ndim} instead.'
degenerated = cv2.filter2D(img, -1, kernel)
sharpened_img = cv2.addWeighted(
......@@ -340,13 +340,13 @@ def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True):
<https://dl.acm.org/doi/pdf/10.1145/3065386>`_.
Args:
img (ndarray): Image to be ajusted lighting. BGR order.
img (ndarray): Image to be adjusted lighting. BGR order.
eigval (ndarray): the eigenvalue of the convariance matrix of pixel
values, respectively.
eigvec (ndarray): the eigenvector of the convariance matrix of pixel
values, respectively.
alphastd (float): The standard deviation for distribution of alpha.
Dafaults to 0.1
Defaults to 0.1
to_rgb (bool): Whether to convert img to rgb.
Returns:
......
......@@ -49,7 +49,7 @@ def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0):
mode_dict = {'iou': 0, 'iof': 1}
assert mode in mode_dict.keys()
mode_flag = mode_dict[mode]
# Either the boxes are empty or the length of boxes's last dimenstion is 4
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
assert offset == 1 or offset == 0
......
......@@ -195,15 +195,15 @@ class FusedBiasLeakyReLU(nn.Module):
The bias term comes from the convolution operation. In addition, to keep
the variance of the feature map or gradients unchanged, they also adopt a
scale similarly with Kaiming initalization. However, since the
scale similarly with Kaiming initialization. However, since the
:math:`1 + \alpha^2` : is too small, we can just ignore it. Therefore, the
final sacle is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
your own scale.
TODO: Implement the CPU version.
Args:
channel (int): The channnel number of the feature map.
channel (int): The channel number of the feature map.
negative_slope (float, optional): Same as nn.LeakyRelu.
Defaults to 0.2.
scale (float, optional): A scalar to adjust the variance of the feature
......@@ -230,9 +230,9 @@ def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
The bias term comes from the convolution operation. In addition, to keep
the variance of the feature map or gradients unchanged, they also adopt a
scale similarly with Kaiming initalization. However, since the
scale similarly with Kaiming initialization. However, since the
:math:`1 + \alpha^2` : is too small, we can just ignore it. Therefore, the
final sacle is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
your own scale.
Args:
......
......@@ -10,7 +10,7 @@ from ..cnn import ConvModule
class BaseMergeCell(nn.Module):
"""The basic class for cells used in NAS-FPN and NAS-FCOS.
BaseMergeCell takes 2 inputs. After applying concolution
BaseMergeCell takes 2 inputs. After applying convolution
on them, they are resized to the target size. Then,
they go through binary_op, which depends on the type of cell.
If with_out_conv is True, the result of output will go through
......
......@@ -350,7 +350,7 @@ def nms_rotated(dets, scores, iou_threshold, labels=None):
be in (x_ctr, y_ctr, width, height, angle_radian) format.
scores (Tensor): scores in shape (N, ).
iou_threshold (float): IoU thresh for NMS.
labels (Tensor): boxes's label in shape (N,).
labels (Tensor): boxes' label in shape (N,).
Returns:
tuple: kept dets(boxes and scores) and indice, which is always the \
......
......@@ -14,7 +14,7 @@ def pixel_group(score, mask, embedding, kernel_label, kernel_contour,
Arguments:
score (np.array or Tensor): The foreground score with size hxw.
mask (np.array or Tensor): The foreground mask with size hxw.
embedding (np.array or Tensor): The emdedding with size hxwxc to
embedding (np.array or Tensor): The embedding with size hxwxc to
distinguish instances.
kernel_label (np.array or Tensor): The instance kernel index with
size hxw.
......
......@@ -23,7 +23,7 @@ def scatter(input, devices, streams=None):
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
else:
# unsquzee the first dimension thus the tensor's shape is the
# unsqueeze the first dimension thus the tensor's shape is the
# same as those scattered with GPU.
output = output.unsqueeze(0)
return output
......
......@@ -22,7 +22,7 @@ class BaseModule(nn.Module, metaclass=ABCMeta):
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weigt() function
# in init_weight() function
self._is_init = False
self.init_cfg = init_cfg
......
......@@ -361,7 +361,7 @@ class CyclicLrUpdaterHook(LrUpdaterHook):
Implement the cyclical learning rate policy (CLR) described in
https://arxiv.org/pdf/1506.01186.pdf
Different from the original paper, we use cosine anealing rather than
Different from the original paper, we use cosine annealing rather than
triangular policy inside a cycle. This improves the performance in the
3D detection area.
......
......@@ -59,7 +59,7 @@ if TORCH_VERSION != 'parrots' and TORCH_VERSION >= '1.6.0':
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For Pytorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, plese refer to:
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
......
......@@ -10,7 +10,7 @@ from .hook import HOOKS, Hook
@HOOKS.register_module()
class ProfilerHook(Hook):
"""Profiler to analyze perfromance during training.
"""Profiler to analyze performance during training.
PyTorch Profiler is a tool that allows the collection of the performance
metrics during the training. More details on Profiler can be found at
......@@ -67,7 +67,7 @@ class ProfilerHook(Hook):
from torch import profiler # torch version >= 1.8.1
except ImportError:
raise ImportError('profiler is the new feature of torch1.8.1, '
f'but your verison is {torch.__version__}')
f'but your version is {torch.__version__}')
assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.'
self.by_epoch = by_epoch
......@@ -120,10 +120,10 @@ class ProfilerHook(Hook):
trace_type = trace_cfg.pop('type') # log_trace handler
if trace_type == 'log_trace':
def _log_hanlder(prof):
def _log_handler(prof):
print(prof.key_averages().table(**trace_cfg))
_on_trace_ready = _log_hanlder
_on_trace_ready = _log_handler
elif trace_type == 'tb_trace': # tensorboard_trace handler
try:
import torch_tb_profiler # noqa: F401
......
......@@ -51,7 +51,7 @@ class DefaultOptimizerConstructor:
``dcn_offset_lr_mult``. If you wish to apply both of them to the
offset layer in deformable convs, set ``dcn_offset_lr_mult``
to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``.
2. If the option ``dcn_offset_lr_mult`` is used, the construtor will
2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
apply it to all the DCN layers in the model. So be carefull when
the model contains multiple DCN layers in places other than
backbone.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment