Unverified Commit 63b7aa31 authored by Kai Chen's avatar Kai Chen Committed by GitHub
Browse files

Fix docstring formats (#383)

* update doc formats

* update docstring
parent a47451b4
......@@ -28,6 +28,11 @@ repos:
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: local
hooks:
- id: clang-format
......
......@@ -18,7 +18,7 @@ from mmcv.runner import DistSamplerSeedHook, Runner
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
"""Computes the precision@k for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
......
......@@ -2,8 +2,8 @@
from .alexnet import AlexNet
from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
ContextBlock, ConvModule, GeneralizedAttention,
NonLocal1d, NonLocal2d, NonLocal3d, Scale,
ContextBlock, ConvModule, GeneralizedAttention, HSigmoid,
HSwish, NonLocal1d, NonLocal2d, NonLocal3d, Scale,
build_activation_layer, build_conv_layer,
build_norm_layer, build_padding_layer, build_plugin_layer,
build_upsample_layer, is_norm)
......@@ -20,7 +20,7 @@ __all__ = [
'build_activation_layer', 'build_conv_layer', 'build_norm_layer',
'build_padding_layer', 'build_upsample_layer', 'build_plugin_layer',
'is_norm', 'NonLocal1d', 'NonLocal2d', 'NonLocal3d', 'ContextBlock',
'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
'get_model_complexity_info'
'HSigmoid', 'HSwish', 'GeneralizedAttention', 'ACTIVATION_LAYERS',
'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS', 'UPSAMPLE_LAYERS',
'PLUGIN_LAYERS', 'Scale', 'get_model_complexity_info'
]
......@@ -32,12 +32,13 @@ class GeneralizedAttention(nn.Module):
Default: 1.
attention_type (str): A binary indicator string for indicating which
items in generalized empirical_attention module are used.
'1000' indicates 'query and key content' (appr - appr) item,
'0100' indicates 'query content and relative position'
(appr - position) item,
'0010' indicates 'key content only' (bias - appr) item,
'0001' indicates 'relative position only' (bias - position) item.
Default: '1111'.
- '1000' indicates 'query and key content' (appr - appr) item,
- '0100' indicates 'query content and relative position'
(appr - position) item,
- '0010' indicates 'key content only' (bias - appr) item,
- '0001' indicates 'relative position only' (bias - position) item.
"""
_abbr_ = 'gen_attention_block'
......
......@@ -5,7 +5,11 @@ from .registry import ACTIVATION_LAYERS
@ACTIVATION_LAYERS.register_module()
class HSwish(nn.Module):
"""Hard Swish Module. Apply the hard swish function:
"""Hard Swish Module.
This module applies the hard swish function:
.. math::
Hswish(x) = x * ReLU6(x + 3) / 6
Args:
......
......@@ -73,6 +73,7 @@ def build_norm_layer(cfg, num_features, postfix=''):
Args:
cfg (dict): The norm layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a norm layer.
- requires_grad (bool, optional): Whether stop gradient updates.
......@@ -81,10 +82,9 @@ def build_norm_layer(cfg, num_features, postfix=''):
to create named layer.
Returns:
tuple[str, nn.Module]:
name (str): The layer name consisting of abbreviation and postfix,
e.g., bn1, gn.
layer (nn.Module): Created norm layer.
(str, nn.Module): The first element is the layer name consisting of
abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
......
......@@ -31,7 +31,6 @@ def infer_abbr(class_type):
>>> camel2snack("FancyBlock")
'fancy_block'
"""
word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
......
......@@ -52,14 +52,15 @@ def build_upsample_layer(cfg, *args, **kwargs):
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
args (argument list): Arguments passed to the ``__init__``
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the
``__init__`` method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
......
......@@ -9,7 +9,7 @@ from .utils import constant_init, kaiming_init
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
......@@ -75,8 +75,8 @@ class Bottleneck(nn.Module):
with_cp=False):
"""Bottleneck block.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
......
......@@ -45,18 +45,19 @@ def get_model_complexity_info(model,
each layer in a model.
Supported layers are listed as below:
- Convolutions: `nn.Conv1d`, `nn.Conv2d`, `nn.Conv3d`.
- Activations: `nn.ReLU`, `nn.PReLU`, `nn.ELU`, `nn.LeakyReLU`,
`nn.ReLU6`.
- Poolings: `nn.MaxPool1d`, `nn.MaxPool2d`, `nn.MaxPool3d`,
`nn.AvgPool1d`, `nn.AvgPool2d`, `nn.AvgPool3d`,
`nn.AdaptiveMaxPool1d`, `nn.AdaptiveMaxPool2d`,
`nn.AdaptiveMaxPool3d`, `nn.AdaptiveAvgPool1d`,
`nn.AdaptiveAvgPool2d`, `nn.AdaptiveAvgPool3d`.
- BatchNorms: `nn.BatchNorm1d`, `nn.BatchNorm2d`, `nn.BatchNorm3d`.
- Linear: `nn.Linear`.
- Deconvolution: `nn.ConvTranspose2d`.
- Upsample: `nn.Upsample`.
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
``nn.ReLU6``.
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
``nn.BatchNorm3d``.
- Linear: ``nn.Linear``.
- Deconvolution: ``nn.ConvTranspose2d``.
- Upsample: ``nn.Upsample``.
Args:
model (nn.Module): The model for complexity calculation.
......@@ -69,11 +70,11 @@ def get_model_complexity_info(model,
method that generates input. otherwise, it will generate a random
tensor with input shape to calculate FLOPs. Default: None.
flush (bool): same as that in :func:`print`. Default: False.
ost (stream): same as `file` param in :func:`print`.
ost (stream): same as ``file`` param in :func:`print`.
Default: sys.stdout.
Returns:
tuple[float | str]: If `as_strings` is set to True, it will return
tuple[float | str]: If ``as_strings`` is set to True, it will return
FLOPs and parameter counts in a string format. otherwise, it will
return those in a float number format.
"""
......@@ -352,7 +353,7 @@ def start_flops_count(self):
"""Activate the computation of mean flops consumption per image.
A method to activate the computation of mean flops consumption per image.
which will be available after `add_flops_counting_methods()` is called on
which will be available after ``add_flops_counting_methods()`` is called on
a desired net object. It should be called before running the network.
"""
add_batch_counter_hook_function(self)
......@@ -374,9 +375,9 @@ def start_flops_count(self):
def stop_flops_count(self):
"""Stop computing the mean flops consumption per image.
A method to stop computing the mean flops consumption per image, which
will be available after `add_flops_counting_methods()` is called on a
desired net object. It can be called to pause the computation whenever.
A method to stop computing the mean flops consumption per image, which will
be available after ``add_flops_counting_methods()`` is called on a desired
net object. It can be called to pause the computation whenever.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
......@@ -385,8 +386,8 @@ def stop_flops_count(self):
def reset_flops_count(self):
"""Reset statistics computed so far.
A method to Reset computed statistics, which will be available
after `add_flops_counting_methods()` is called on a desired net object.
A method to Reset computed statistics, which will be available after
`add_flops_counting_methods()` is called on a desired net object.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
......
......@@ -61,6 +61,6 @@ def caffe2_xavier_init(module, bias=0):
def bias_init_with_prob(prior_prob):
""" initialize conv/fc bias value according to giving probablity"""
"""initialize conv/fc bias value according to giving probablity."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
......@@ -8,7 +8,7 @@ from .utils import constant_init, kaiming_init, normal_init
def conv3x3(in_planes, out_planes, dilation=1):
"""3x3 convolution with padding"""
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
......
......@@ -6,8 +6,8 @@ from abc import ABCMeta, abstractmethod
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
All backends need to implement two apis: ``get()`` and ``get_text()``.
``get()`` reads the file as a byte stream and ``get_text()`` reads the file
as texts.
"""
......@@ -25,8 +25,8 @@ class CephBackend(BaseStorageBackend):
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
will be replaced by ``dst``. Default: None.
"""
def __init__(self, path_mapping=None):
......
......@@ -336,6 +336,7 @@ def impad(img,
areas when padding_mode is 'constant'. Default: 0.
padding_mode (str): Type of padding. Should be: constant, edge,
reflect or symmetric. Default: constant.
- constant: pads with a constant value, this value is specified
with pad_val.
- edge: pads with the last value at the edge of the image.
......@@ -370,8 +371,8 @@ def impad(img,
elif isinstance(padding, numbers.Number):
padding = (padding, padding, padding, padding)
else:
raise ValueError("Padding must be a int or a 2, or 4 element tuple."
f"But received {padding}")
raise ValueError('Padding must be a int or a 2, or 4 element tuple.'
f'But received {padding}')
# check padding mode
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
......
......@@ -70,7 +70,7 @@ def _jpegflag(flag='color', channel_order='bgr'):
def _pillow2array(img, flag='color', channel_order='bgr'):
"""Convert a pillow image to numpy array
"""Convert a pillow image to numpy array.
Args:
img (:obj:`PIL.Image.Image`): The image loaded using PIL
......@@ -215,7 +215,7 @@ def imfrombytes(content, flag='color', channel_order='bgr', backend=None):
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file
"""Write image to file.
Args:
img (ndarray): Image array to be written.
......
......@@ -9,7 +9,7 @@ except ImportError:
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
"""Convert tensor to 3-channel images
"""Convert tensor to 3-channel images.
Args:
tensor (torch.Tensor): Tensor that contains multiple images, shape (
......
......@@ -53,7 +53,7 @@ def imdenormalize(img, mean, std, to_bgr=True):
def iminvert(img):
"""Invert (negate) an image
"""Invert (negate) an image.
Args:
img (ndarray): Image to be inverted.
......
......@@ -240,11 +240,12 @@ class DeformConv2dPack(DeformConv2d):
The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
The spatial arrangement is like:
```
.. code:: text
(x0, y0) (x1, y1) (x2, y2)
(x3, y3) (x4, y4) (x5, y5)
(x6, y6) (x7, y7) (x8, y8)
```
Args:
in_channels (int): Same as nn.Conv2d.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment