Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
MMCV
Commits
222f3807
Unverified
Commit
222f3807
authored
Dec 09, 2021
by
Zaida Zhou
Committed by
GitHub
Dec 09, 2021
Browse files
[Docs] Fix the format of the docstring (#1573)
* [Docs] Fix the format of docstring * fix format
parent
53c1b2fe
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
135 additions
and
137 deletions
+135
-137
mmcv/cnn/bricks/activation.py
mmcv/cnn/bricks/activation.py
+1
-0
mmcv/cnn/bricks/norm.py
mmcv/cnn/bricks/norm.py
+3
-3
mmcv/cnn/bricks/plugin.py
mmcv/cnn/bricks/plugin.py
+5
-5
mmcv/cnn/bricks/upsample.py
mmcv/cnn/bricks/upsample.py
+1
-1
mmcv/cnn/utils/flops_counter.py
mmcv/cnn/utils/flops_counter.py
+10
-10
mmcv/fileio/file_client.py
mmcv/fileio/file_client.py
+1
-1
mmcv/image/colorspace.py
mmcv/image/colorspace.py
+4
-4
mmcv/image/geometric.py
mmcv/image/geometric.py
+12
-12
mmcv/ops/group_points.py
mmcv/ops/group_points.py
+1
-1
mmcv/ops/nms.py
mmcv/ops/nms.py
+5
-5
mmcv/ops/scatter_points.py
mmcv/ops/scatter_points.py
+12
-15
mmcv/runner/base_module.py
mmcv/runner/base_module.py
+6
-7
mmcv/runner/base_runner.py
mmcv/runner/base_runner.py
+5
-5
mmcv/runner/hooks/ema.py
mmcv/runner/hooks/ema.py
+2
-2
mmcv/runner/hooks/logger/neptune.py
mmcv/runner/hooks/logger/neptune.py
+14
-12
mmcv/utils/config.py
mmcv/utils/config.py
+2
-2
mmcv/utils/path.py
mmcv/utils/path.py
+1
-1
mmcv/utils/registry.py
mmcv/utils/registry.py
+5
-5
mmcv/utils/timer.py
mmcv/utils/timer.py
+36
-36
mmcv/video/io.py
mmcv/video/io.py
+9
-10
No files found.
mmcv/cnn/bricks/activation.py
View file @
222f3807
...
@@ -83,6 +83,7 @@ def build_activation_layer(cfg):
...
@@ -83,6 +83,7 @@ def build_activation_layer(cfg):
Args:
Args:
cfg (dict): The activation layer config, which should contain:
cfg (dict): The activation layer config, which should contain:
- type (str): Layer type.
- type (str): Layer type.
- layer args: Args needed to instantiate an activation layer.
- layer args: Args needed to instantiate an activation layer.
...
...
mmcv/cnn/bricks/norm.py
View file @
222f3807
...
@@ -83,9 +83,9 @@ def build_norm_layer(cfg, num_features, postfix=''):
...
@@ -83,9 +83,9 @@ def build_norm_layer(cfg, num_features, postfix=''):
to create named layer.
to create named layer.
Returns:
Returns:
(
str, nn.Module
)
: The first element is the layer name consisting
of
tuple[
str, nn.Module
]
: The first element is the layer name consisting
abbreviation and postfix, e.g., bn1, gn. The second element is the
of
abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
created norm layer.
"""
"""
if
not
isinstance
(
cfg
,
dict
):
if
not
isinstance
(
cfg
,
dict
):
raise
TypeError
(
'cfg must be a dict'
)
raise
TypeError
(
'cfg must be a dict'
)
...
...
mmcv/cnn/bricks/plugin.py
View file @
222f3807
...
@@ -57,15 +57,15 @@ def build_plugin_layer(cfg, postfix='', **kwargs):
...
@@ -57,15 +57,15 @@ def build_plugin_layer(cfg, postfix='', **kwargs):
Args:
Args:
cfg (None or dict): cfg should contain:
cfg (None or dict): cfg should contain:
type (str): identify plugin layer type.
layer args: args needed to instantiate a plugin layer.
- type (str): identify plugin layer type.
- layer args: args needed to instantiate a plugin layer.
postfix (int, str): appended into norm abbreviation to
postfix (int, str): appended into norm abbreviation to
create named layer. Default: ''.
create named layer. Default: ''.
Returns:
Returns:
tuple[str, nn.Module]:
tuple[str, nn.Module]: The first one is the concatenation of
name (str): abbreviation + postfix
abbreviation and postfix. The second is the created plugin layer.
layer (nn.Module): created plugin layer
"""
"""
if
not
isinstance
(
cfg
,
dict
):
if
not
isinstance
(
cfg
,
dict
):
raise
TypeError
(
'cfg must be a dict'
)
raise
TypeError
(
'cfg must be a dict'
)
...
...
mmcv/cnn/bricks/upsample.py
View file @
222f3807
...
@@ -55,7 +55,7 @@ def build_upsample_layer(cfg, *args, **kwargs):
...
@@ -55,7 +55,7 @@ def build_upsample_layer(cfg, *args, **kwargs):
- type (str): Layer type.
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
deconv.
- layer args: Args needed to instantiate a upsample layer.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the ``__init__``
args (argument list): Arguments passed to the ``__init__``
method of the corresponding conv layer.
method of the corresponding conv layer.
...
...
mmcv/cnn/utils/flops_counter.py
View file @
222f3807
...
@@ -48,16 +48,16 @@ def get_model_complexity_info(model,
...
@@ -48,16 +48,16 @@ def get_model_complexity_info(model,
Supported layers are listed as below:
Supported layers are listed as below:
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,
``nn.LeakyReLU``,
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,
``nn.ReLU6``.
``nn.LeakyReLU``,
``nn.ReLU6``.
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
- Linear: ``nn.Linear``.
- Linear: ``nn.Linear``.
- Deconvolution: ``nn.ConvTranspose2d``.
- Deconvolution: ``nn.ConvTranspose2d``.
- Upsample: ``nn.Upsample``.
- Upsample: ``nn.Upsample``.
...
@@ -78,8 +78,8 @@ def get_model_complexity_info(model,
...
@@ -78,8 +78,8 @@ def get_model_complexity_info(model,
Returns:
Returns:
tuple[float | str]: If ``as_strings`` is set to True, it will return
tuple[float | str]: If ``as_strings`` is set to True, it will return
FLOPs and parameter counts in a string format. otherwise, it will
FLOPs and parameter counts in a string format. otherwise, it will
return those in a float number format.
return those in a float number format.
"""
"""
assert
type
(
input_shape
)
is
tuple
assert
type
(
input_shape
)
is
tuple
assert
len
(
input_shape
)
>=
1
assert
len
(
input_shape
)
>=
1
...
...
mmcv/fileio/file_client.py
View file @
222f3807
...
@@ -1072,7 +1072,7 @@ class FileClient:
...
@@ -1072,7 +1072,7 @@ class FileClient:
Returns:
Returns:
bool: Return ``True`` if ``filepath`` points to a file, ``False``
bool: Return ``True`` if ``filepath`` points to a file, ``False``
otherwise.
otherwise.
"""
"""
return
self
.
client
.
isfile
(
filepath
)
return
self
.
client
.
isfile
(
filepath
)
...
...
mmcv/image/colorspace.py
View file @
222f3807
...
@@ -160,7 +160,7 @@ def rgb2ycbcr(img, y_only=False):
...
@@ -160,7 +160,7 @@ def rgb2ycbcr(img, y_only=False):
Returns:
Returns:
ndarray: The converted YCbCr image. The output image has the same type
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
and range as input image.
"""
"""
img_type
=
img
.
dtype
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
img
=
_convert_input_type_range
(
img
)
...
@@ -194,7 +194,7 @@ def bgr2ycbcr(img, y_only=False):
...
@@ -194,7 +194,7 @@ def bgr2ycbcr(img, y_only=False):
Returns:
Returns:
ndarray: The converted YCbCr image. The output image has the same type
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
and range as input image.
"""
"""
img_type
=
img
.
dtype
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
img
=
_convert_input_type_range
(
img
)
...
@@ -227,7 +227,7 @@ def ycbcr2rgb(img):
...
@@ -227,7 +227,7 @@ def ycbcr2rgb(img):
Returns:
Returns:
ndarray: The converted RGB image. The output image has the same type
ndarray: The converted RGB image. The output image has the same type
and range as input image.
and range as input image.
"""
"""
img_type
=
img
.
dtype
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
*
255
img
=
_convert_input_type_range
(
img
)
*
255
...
@@ -259,7 +259,7 @@ def ycbcr2bgr(img):
...
@@ -259,7 +259,7 @@ def ycbcr2bgr(img):
Returns:
Returns:
ndarray: The converted BGR image. The output image has the same type
ndarray: The converted BGR image. The output image has the same type
and range as input image.
and range as input image.
"""
"""
img_type
=
img
.
dtype
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
*
255
img
=
_convert_input_type_range
(
img
)
*
255
...
...
mmcv/image/geometric.py
View file @
222f3807
...
@@ -70,7 +70,7 @@ def imresize(img,
...
@@ -70,7 +70,7 @@ def imresize(img,
Returns:
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
"""
h
,
w
=
img
.
shape
[:
2
]
h
,
w
=
img
.
shape
[:
2
]
if
backend
is
None
:
if
backend
is
None
:
...
@@ -130,7 +130,7 @@ def imresize_to_multiple(img,
...
@@ -130,7 +130,7 @@ def imresize_to_multiple(img,
Returns:
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
"""
h
,
w
=
img
.
shape
[:
2
]
h
,
w
=
img
.
shape
[:
2
]
if
size
is
not
None
and
scale_factor
is
not
None
:
if
size
is
not
None
and
scale_factor
is
not
None
:
...
@@ -175,7 +175,7 @@ def imresize_like(img,
...
@@ -175,7 +175,7 @@ def imresize_like(img,
Returns:
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
"""
h
,
w
=
dst_img
.
shape
[:
2
]
h
,
w
=
dst_img
.
shape
[:
2
]
return
imresize
(
img
,
(
w
,
h
),
return_scale
,
interpolation
,
backend
=
backend
)
return
imresize
(
img
,
(
w
,
h
),
return_scale
,
interpolation
,
backend
=
backend
)
...
@@ -462,16 +462,16 @@ def impad(img,
...
@@ -462,16 +462,16 @@ def impad(img,
reflect or symmetric. Default: constant.
reflect or symmetric. Default: constant.
- constant: pads with a constant value, this value is specified
- constant: pads with a constant value, this value is specified
with pad_val.
with pad_val.
- edge: pads with the last value at the edge of the image.
- edge: pads with the last value at the edge of the image.
- reflect: pads with reflection of image without repeating the
- reflect: pads with reflection of image without repeating the
last
last
value on the edge. For example, padding [1, 2, 3, 4]
value on the edge. For example, padding [1, 2, 3, 4]
with 2
with 2
elements on both sides in reflect mode will result
elements on both sides in reflect mode will result
in
in
[3, 2, 1, 2, 3, 4, 3, 2].
[3, 2, 1, 2, 3, 4, 3, 2].
- symmetric: pads with reflection of image repeating the last
- symmetric: pads with reflection of image repeating the last
value
value
on the edge. For example, padding [1, 2, 3, 4] with
on the edge. For example, padding [1, 2, 3, 4] with
2 elements on
2 elements on
both sides in symmetric mode will result in
both sides in symmetric mode will result in
[2, 1, 1, 2, 3, 4, 4, 3]
[2, 1, 1, 2, 3, 4, 4, 3]
Returns:
Returns:
ndarray: The padded image.
ndarray: The padded image.
...
...
mmcv/ops/group_points.py
View file @
222f3807
...
@@ -76,7 +76,7 @@ class QueryAndGroup(nn.Module):
...
@@ -76,7 +76,7 @@ class QueryAndGroup(nn.Module):
Returns:
Returns:
torch.Tensor: (B, 3 + C, npoint, sample_num) Grouped
torch.Tensor: (B, 3 + C, npoint, sample_num) Grouped
concatenated coordinates and features of points.
concatenated coordinates and features of points.
"""
"""
# if self.max_radius is None, we will perform kNN instead of ball query
# if self.max_radius is None, we will perform kNN instead of ball query
# idx is of shape [B, npoint, sample_num]
# idx is of shape [B, npoint, sample_num]
...
...
mmcv/ops/nms.py
View file @
222f3807
...
@@ -277,11 +277,11 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
...
@@ -277,11 +277,11 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
- iou_thr (float): IoU threshold used for NMS.
- iou_thr (float): IoU threshold used for NMS.
- split_thr (float): threshold number of boxes. In some cases the
- split_thr (float): threshold number of boxes. In some cases the
number of boxes is large (e.g., 200k). To avoid OOM during
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
Defaults to 10000.
class_agnostic (bool): if true, nms is class agnostic,
class_agnostic (bool): if true, nms is class agnostic,
i.e. IoU thresholding happens over all boxes,
i.e. IoU thresholding happens over all boxes,
regardless of the predicted class.
regardless of the predicted class.
...
...
mmcv/ops/scatter_points.py
View file @
222f3807
...
@@ -25,11 +25,10 @@ class _DynamicScatter(Function):
...
@@ -25,11 +25,10 @@ class _DynamicScatter(Function):
'mean'. Default: 'max'.
'mean'. Default: 'max'.
Returns:
Returns:
tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two
tuple[torch.Tensor]: A tuple contains two elements. The first one
elements. The first one is the voxel features with shape [M, C]
is the voxel features with shape [M, C] which are respectively
which are respectively reduced from input features that share
reduced from input features that share the same voxel coordinates.
the same voxel coordinates . The second is voxel coordinates
The second is voxel coordinates with shape [M, ndim].
with shape [M, ndim].
"""
"""
results
=
ext_module
.
dynamic_point_to_voxel_forward
(
results
=
ext_module
.
dynamic_point_to_voxel_forward
(
feats
,
coors
,
reduce_type
)
feats
,
coors
,
reduce_type
)
...
@@ -89,11 +88,10 @@ class DynamicScatter(nn.Module):
...
@@ -89,11 +88,10 @@ class DynamicScatter(nn.Module):
multi-dim voxel index) of each points.
multi-dim voxel index) of each points.
Returns:
Returns:
tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two
tuple[torch.Tensor]: A tuple contains two elements. The first one
elements. The first one is the voxel features with shape [M, C]
is the voxel features with shape [M, C] which are respectively
which are respectively reduced from input features that share
reduced from input features that share the same voxel coordinates.
the same voxel coordinates . The second is voxel coordinates
The second is voxel coordinates with shape [M, ndim].
with shape [M, ndim].
"""
"""
reduce
=
'mean'
if
self
.
average_points
else
'max'
reduce
=
'mean'
if
self
.
average_points
else
'max'
return
dynamic_scatter
(
points
.
contiguous
(),
coors
.
contiguous
(),
reduce
)
return
dynamic_scatter
(
points
.
contiguous
(),
coors
.
contiguous
(),
reduce
)
...
@@ -107,11 +105,10 @@ class DynamicScatter(nn.Module):
...
@@ -107,11 +105,10 @@ class DynamicScatter(nn.Module):
multi-dim voxel index) of each points.
multi-dim voxel index) of each points.
Returns:
Returns:
tuple[torch.Tensor]:tuple[torch.Tensor]: A tuple contains two
tuple[torch.Tensor]: A tuple contains two elements. The first one
elements. The first one is the voxel features with shape [M, C]
is the voxel features with shape [M, C] which are respectively
which are respectively reduced from input features that share
reduced from input features that share the same voxel coordinates.
the same voxel coordinates . The second is voxel coordinates
The second is voxel coordinates with shape [M, ndim].
with shape [M, ndim].
"""
"""
if
coors
.
size
(
-
1
)
==
3
:
if
coors
.
size
(
-
1
)
==
3
:
return
self
.
forward_single
(
points
,
coors
)
return
self
.
forward_single
(
points
,
coors
)
...
...
mmcv/runner/base_module.py
View file @
222f3807
...
@@ -18,13 +18,12 @@ class BaseModule(nn.Module, metaclass=ABCMeta):
...
@@ -18,13 +18,12 @@ class BaseModule(nn.Module, metaclass=ABCMeta):
functionality of parameter initialization. Compared with
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
- ``init_weights``: The function of parameter initialization and recording
initialization and recording initialization
initialization information.
information.
- ``_params_init_info``: Used to track the parameter initialization
- ``_params_init_info``: Used to track the parameter
information. This attribute only exists during executing the
initialization information. This attribute only
``init_weights``.
exists during executing the ``init_weights``.
Args:
Args:
init_cfg (dict, optional): Initialization config dict.
init_cfg (dict, optional): Initialization config dict.
...
...
mmcv/runner/base_runner.py
View file @
222f3807
...
@@ -207,8 +207,8 @@ class BaseRunner(metaclass=ABCMeta):
...
@@ -207,8 +207,8 @@ class BaseRunner(metaclass=ABCMeta):
Returns:
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
param groups. If the runner has a dict of optimizers, this
method
method
will return a dict.
will return a dict.
"""
"""
if
isinstance
(
self
.
optimizer
,
torch
.
optim
.
Optimizer
):
if
isinstance
(
self
.
optimizer
,
torch
.
optim
.
Optimizer
):
lr
=
[
group
[
'lr'
]
for
group
in
self
.
optimizer
.
param_groups
]
lr
=
[
group
[
'lr'
]
for
group
in
self
.
optimizer
.
param_groups
]
...
@@ -226,8 +226,8 @@ class BaseRunner(metaclass=ABCMeta):
...
@@ -226,8 +226,8 @@ class BaseRunner(metaclass=ABCMeta):
Returns:
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
param groups. If the runner has a dict of optimizers, this
method
method
will return a dict.
will return a dict.
"""
"""
def
_get_momentum
(
optimizer
):
def
_get_momentum
(
optimizer
):
...
@@ -287,7 +287,7 @@ class BaseRunner(metaclass=ABCMeta):
...
@@ -287,7 +287,7 @@ class BaseRunner(metaclass=ABCMeta):
hook_cfg (dict): Hook config. It should have at least keys 'type'
hook_cfg (dict): Hook config. It should have at least keys 'type'
and 'priority' indicating its type and priority.
and 'priority' indicating its type and priority.
Note
s
:
Note:
The specific hook class to register should not use 'type' and
The specific hook class to register should not use 'type' and
'priority' arguments during initialization.
'priority' arguments during initialization.
"""
"""
...
...
mmcv/runner/hooks/ema.py
View file @
222f3807
...
@@ -13,8 +13,8 @@ class EMAHook(Hook):
...
@@ -13,8 +13,8 @@ class EMAHook(Hook):
.. math::
.. math::
\text{
Xema\_{t+1}
}
= (1 - \text{momentum}) \times
Xema\_{t+1} = (1 - \text{momentum}) \times
\text{
Xema\_{t}
}
+ \text{momentum} \times X_t
Xema\_{t} + \text{momentum} \times X_t
Args:
Args:
momentum (float): The momentum used for updating ema parameter.
momentum (float): The momentum used for updating ema parameter.
...
...
mmcv/runner/hooks/logger/neptune.py
View file @
222f3807
...
@@ -12,19 +12,21 @@ class NeptuneLoggerHook(LoggerHook):
...
@@ -12,19 +12,21 @@ class NeptuneLoggerHook(LoggerHook):
Args:
Args:
init_kwargs (dict): a dict contains the initialization keys as below:
init_kwargs (dict): a dict contains the initialization keys as below:
- project (str): Name of a project in a form of
- project (str): Name of a project in a form of
namespace/project_name. If None, the value of
namespace/project_name. If None, the value of NEPTUNE_PROJECT
NEPTUNE_PROJECT environment variable will be taken.
environment variable will be taken.
- api_token (str): User’s API token.
- api_token (str): User’s API token. If None, the value of
If None, the value of NEPTUNE_API_TOKEN environment
NEPTUNE_API_TOKEN environment variable will be taken. Note: It is
variable will be taken. Note: It is strongly recommended
strongly recommended to use NEPTUNE_API_TOKEN environment
to use NEPTUNE_API_TOKEN environment variable rather than
variable rather than placing your API token in plain text in your
placing your API token in plain text in your source code.
source code.
- name (str, optional, default is 'Untitled'): Editable name of
- name (str, optional, default is 'Untitled'): Editable name of the
the run. Name is displayed in the run's Details and in
run. Name is displayed in the run's Details and in Runs table as
Runs table as a column.
a column.
Check https://docs.neptune.ai/api-reference/neptune#init for
more init arguments.
Check https://docs.neptune.ai/api-reference/neptune#init for more
init arguments.
interval (int): Logging interval (every k iterations).
interval (int): Logging interval (every k iterations).
ignore_last (bool): Ignore the log of last iterations in each epoch
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
if less than `interval`.
...
...
mmcv/utils/config.py
View file @
222f3807
...
@@ -344,7 +344,7 @@ class Config:
...
@@ -344,7 +344,7 @@ class Config:
config str. Only py/yml/yaml/json type are supported now!
config str. Only py/yml/yaml/json type are supported now!
Returns:
Returns:
obj:`Config`: Config obj.
:
obj:`Config`: Config obj.
"""
"""
if
file_format
not
in
[
'.py'
,
'.json'
,
'.yaml'
,
'.yml'
]:
if
file_format
not
in
[
'.py'
,
'.json'
,
'.yaml'
,
'.yml'
]:
raise
IOError
(
'Only py/yml/yaml/json type are supported now!'
)
raise
IOError
(
'Only py/yml/yaml/json type are supported now!'
)
...
@@ -561,7 +561,7 @@ class Config:
...
@@ -561,7 +561,7 @@ class Config:
>>> assert cfg_dict == dict(
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>>
# Merge list element
>>> cfg = Config(dict(pipeline=[
>>> cfg = Config(dict(pipeline=[
... dict(type='LoadImage'), dict(type='LoadAnnotations')]))
... dict(type='LoadImage'), dict(type='LoadAnnotations')]))
>>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})
>>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})
...
...
mmcv/utils/path.py
View file @
222f3807
...
@@ -40,7 +40,7 @@ def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
...
@@ -40,7 +40,7 @@ def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
"""Scan a directory to find the interested files.
Args:
Args:
dir_path (str | obj:`Path`): Path of the directory.
dir_path (str |
:
obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
recursive (bool, optional): If set to True, recursively scan the
...
...
mmcv/utils/registry.py
View file @
222f3807
...
@@ -59,6 +59,7 @@ class Registry:
...
@@ -59,6 +59,7 @@ class Registry:
"""A registry to map strings to classes.
"""A registry to map strings to classes.
Registered object could be built from registry.
Registered object could be built from registry.
Example:
Example:
>>> MODELS = Registry('models')
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> @MODELS.register_module()
...
@@ -128,16 +129,15 @@ class Registry:
...
@@ -128,16 +129,15 @@ class Registry:
The name of the package where registry is defined will be returned.
The name of the package where registry is defined will be returned.
Example:
Example:
# in mmdet/models/backbone/resnet.py
>>>
# in mmdet/models/backbone/resnet.py
>>> MODELS = Registry('models')
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> @MODELS.register_module()
>>> class ResNet:
>>> class ResNet:
>>> pass
>>> pass
The scope of ``ResNet`` will be ``mmdet``.
The scope of ``ResNet`` will be ``mmdet``.
Returns:
Returns:
scope (
str
)
: The inferred scope name.
str: The inferred scope name.
"""
"""
# inspect.stack() trace where this function is called, the index-2
# inspect.stack() trace where this function is called, the index-2
# indicates the frame where `infer_scope()` is called
# indicates the frame where `infer_scope()` is called
...
@@ -158,8 +158,8 @@ class Registry:
...
@@ -158,8 +158,8 @@ class Registry:
None, 'ResNet'
None, 'ResNet'
Return:
Return:
scope (
str
,
None
)
: The first scope
.
tuple[
str
|
None
, str]
: The
former element is the
first scope
of
key (str): T
he remaining key.
the key, which can be ``None``. The latter is t
he remaining key.
"""
"""
split_index
=
key
.
find
(
'.'
)
split_index
=
key
.
find
(
'.'
)
if
split_index
!=
-
1
:
if
split_index
!=
-
1
:
...
...
mmcv/utils/timer.py
View file @
222f3807
...
@@ -12,27 +12,26 @@ class TimerError(Exception):
...
@@ -12,27 +12,26 @@ class TimerError(Exception):
class
Timer
:
class
Timer
:
"""A flexible Timer class.
"""A flexible Timer class.
:Example:
Examples:
>>> import time
>>> import time
>>> import mmcv
>>> import mmcv
>>> with mmcv.Timer():
>>> with mmcv.Timer():
>>> # simulate a code block that will run for 1s
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
>>> time.sleep(1)
1.000
1.000
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> # simulate a code block that will run for 1s
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
>>> time.sleep(1)
it takes 1.0 seconds
it takes 1.0 seconds
>>> timer = mmcv.Timer()
>>> timer = mmcv.Timer()
>>> time.sleep(0.5)
>>> time.sleep(0.5)
>>> print(timer.since_start())
>>> print(timer.since_start())
0.500
0.500
>>> time.sleep(0.5)
>>> time.sleep(0.5)
>>> print(timer.since_last_check())
>>> print(timer.since_last_check())
0.500
0.500
>>> print(timer.since_start())
>>> print(timer.since_start())
1.000
1.000
"""
"""
def
__init__
(
self
,
start
=
True
,
print_tmpl
=
None
):
def
__init__
(
self
,
start
=
True
,
print_tmpl
=
None
):
...
@@ -64,7 +63,8 @@ class Timer:
...
@@ -64,7 +63,8 @@ class Timer:
def
since_start
(
self
):
def
since_start
(
self
):
"""Total time since the timer is started.
"""Total time since the timer is started.
Returns (float): Time in seconds.
Returns:
float: Time in seconds.
"""
"""
if
not
self
.
_is_running
:
if
not
self
.
_is_running
:
raise
TimerError
(
'timer is not running'
)
raise
TimerError
(
'timer is not running'
)
...
@@ -77,7 +77,8 @@ class Timer:
...
@@ -77,7 +77,8 @@ class Timer:
Either :func:`since_start` or :func:`since_last_check` is a checking
Either :func:`since_start` or :func:`since_last_check` is a checking
operation.
operation.
Returns (float): Time in seconds.
Returns:
float: Time in seconds.
"""
"""
if
not
self
.
_is_running
:
if
not
self
.
_is_running
:
raise
TimerError
(
'timer is not running'
)
raise
TimerError
(
'timer is not running'
)
...
@@ -95,21 +96,20 @@ def check_time(timer_id):
...
@@ -95,21 +96,20 @@ def check_time(timer_id):
This method is suitable for running a task on a list of items. A timer will
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
be registered when the method is called for the first time.
:Example:
Examples:
>>> import time
>>> import time
>>> import mmcv
>>> import mmcv
>>> for i in range(1, 6):
>>> for i in range(1, 6):
>>> # simulate a code block
>>> # simulate a code block
>>> time.sleep(i)
>>> time.sleep(i)
>>> mmcv.check_time('task1')
>>> mmcv.check_time('task1')
2.000
2.000
3.000
3.000
4.000
4.000
5.000
5.000
Args:
Args:
timer_id (
str
)
: Timer identifier.
str: Timer identifier.
"""
"""
if
timer_id
not
in
_g_timers
:
if
timer_id
not
in
_g_timers
:
_g_timers
[
timer_id
]
=
Timer
()
_g_timers
[
timer_id
]
=
Timer
()
...
...
mmcv/video/io.py
View file @
222f3807
...
@@ -50,15 +50,14 @@ class VideoReader:
...
@@ -50,15 +50,14 @@ class VideoReader:
the second time, there is no need to decode again if it is stored in the
the second time, there is no need to decode again if it is stored in the
cache.
cache.
:Example:
Examples:
>>> import mmcv
>>> import mmcv
>>> v = mmcv.VideoReader('sample.mp4')
>>> v = mmcv.VideoReader('sample.mp4')
>>> len(v) # get the total frame number with `len()`
>>> len(v) # get the total frame number with `len()`
120
120
>>> for img in v: # v is iterable
>>> for img in v: # v is iterable
>>> mmcv.imshow(img)
>>> mmcv.imshow(img)
>>> v[5] # get the 6th frame
>>> v[5] # get the 6th frame
"""
"""
def
__init__
(
self
,
filename
,
cache_capacity
=
10
):
def
__init__
(
self
,
filename
,
cache_capacity
=
10
):
...
@@ -189,7 +188,7 @@ class VideoReader:
...
@@ -189,7 +188,7 @@ class VideoReader:
Returns:
Returns:
ndarray or None: If the video is fresh, return None, otherwise
ndarray or None: If the video is fresh, return None, otherwise
return the frame.
return the frame.
"""
"""
if
self
.
_position
==
0
:
if
self
.
_position
==
0
:
return
None
return
None
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment