Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
MMCV
Commits
222f3807
Unverified
Commit
222f3807
authored
Dec 09, 2021
by
Zaida Zhou
Committed by
GitHub
Dec 09, 2021
Browse files
[Docs] Fix the format of the docstring (#1573)
* [Docs] Fix the format of docstring * fix format
parent
53c1b2fe
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
135 additions
and
137 deletions
+135
-137
mmcv/cnn/bricks/activation.py
mmcv/cnn/bricks/activation.py
+1
-0
mmcv/cnn/bricks/norm.py
mmcv/cnn/bricks/norm.py
+3
-3
mmcv/cnn/bricks/plugin.py
mmcv/cnn/bricks/plugin.py
+5
-5
mmcv/cnn/bricks/upsample.py
mmcv/cnn/bricks/upsample.py
+1
-1
mmcv/cnn/utils/flops_counter.py
mmcv/cnn/utils/flops_counter.py
+10
-10
mmcv/fileio/file_client.py
mmcv/fileio/file_client.py
+1
-1
mmcv/image/colorspace.py
mmcv/image/colorspace.py
+4
-4
mmcv/image/geometric.py
mmcv/image/geometric.py
+12
-12
mmcv/ops/group_points.py
mmcv/ops/group_points.py
+1
-1
mmcv/ops/nms.py
mmcv/ops/nms.py
+5
-5
mmcv/ops/scatter_points.py
mmcv/ops/scatter_points.py
+12
-15
mmcv/runner/base_module.py
mmcv/runner/base_module.py
+6
-7
mmcv/runner/base_runner.py
mmcv/runner/base_runner.py
+5
-5
mmcv/runner/hooks/ema.py
mmcv/runner/hooks/ema.py
+2
-2
mmcv/runner/hooks/logger/neptune.py
mmcv/runner/hooks/logger/neptune.py
+14
-12
mmcv/utils/config.py
mmcv/utils/config.py
+2
-2
mmcv/utils/path.py
mmcv/utils/path.py
+1
-1
mmcv/utils/registry.py
mmcv/utils/registry.py
+5
-5
mmcv/utils/timer.py
mmcv/utils/timer.py
+36
-36
mmcv/video/io.py
mmcv/video/io.py
+9
-10
No files found.
mmcv/cnn/bricks/activation.py
View file @
222f3807
...
...
@@ -83,6 +83,7 @@ def build_activation_layer(cfg):
Args:
cfg (dict): The activation layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate an activation layer.
...
...
mmcv/cnn/bricks/norm.py
View file @
222f3807
...
...
@@ -83,9 +83,9 @@ def build_norm_layer(cfg, num_features, postfix=''):
to create named layer.
Returns:
(
str, nn.Module
)
: The first element is the layer name consisting
of
abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
tuple[
str, nn.Module
]
: The first element is the layer name consisting
of
abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
"""
if
not
isinstance
(
cfg
,
dict
):
raise
TypeError
(
'cfg must be a dict'
)
...
...
mmcv/cnn/bricks/plugin.py
View file @
222f3807
...
...
@@ -57,15 +57,15 @@ def build_plugin_layer(cfg, postfix='', **kwargs):
Args:
cfg (None or dict): cfg should contain:
type (str): identify plugin layer type.
layer args: args needed to instantiate a plugin layer.
- type (str): identify plugin layer type.
- layer args: args needed to instantiate a plugin layer.
postfix (int, str): appended into norm abbreviation to
create named layer. Default: ''.
Returns:
tuple[str, nn.Module]:
name (str): abbreviation + postfix
layer (nn.Module): created plugin layer
tuple[str, nn.Module]: The first one is the concatenation of
abbreviation and postfix. The second is the created plugin layer.
"""
if
not
isinstance
(
cfg
,
dict
):
raise
TypeError
(
'cfg must be a dict'
)
...
...
mmcv/cnn/bricks/upsample.py
View file @
222f3807
...
...
@@ -55,7 +55,7 @@ def build_upsample_layer(cfg, *args, **kwargs):
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the ``__init__``
method of the corresponding conv layer.
...
...
mmcv/cnn/utils/flops_counter.py
View file @
222f3807
...
...
@@ -48,16 +48,16 @@ def get_model_complexity_info(model,
Supported layers are listed as below:
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,
``nn.LeakyReLU``,
``nn.ReLU6``.
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,
``nn.LeakyReLU``,
``nn.ReLU6``.
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
- Linear: ``nn.Linear``.
- Deconvolution: ``nn.ConvTranspose2d``.
- Upsample: ``nn.Upsample``.
...
...
@@ -78,8 +78,8 @@ def get_model_complexity_info(model,
Returns:
tuple[float | str]: If ``as_strings`` is set to True, it will return
FLOPs and parameter counts in a string format. otherwise, it will
return those in a float number format.
FLOPs and parameter counts in a string format. otherwise, it will
return those in a float number format.
"""
assert
type
(
input_shape
)
is
tuple
assert
len
(
input_shape
)
>=
1
...
...
mmcv/fileio/file_client.py
View file @
222f3807
...
...
@@ -1072,7 +1072,7 @@ class FileClient:
Returns:
bool: Return ``True`` if ``filepath`` points to a file, ``False``
otherwise.
otherwise.
"""
return
self
.
client
.
isfile
(
filepath
)
...
...
mmcv/image/colorspace.py
View file @
222f3807
...
...
@@ -160,7 +160,7 @@ def rgb2ycbcr(img, y_only=False):
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
and range as input image.
"""
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
...
...
@@ -194,7 +194,7 @@ def bgr2ycbcr(img, y_only=False):
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
and range as input image.
"""
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
...
...
@@ -227,7 +227,7 @@ def ycbcr2rgb(img):
Returns:
ndarray: The converted RGB image. The output image has the same type
and range as input image.
and range as input image.
"""
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
*
255
...
...
@@ -259,7 +259,7 @@ def ycbcr2bgr(img):
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
and range as input image.
"""
img_type
=
img
.
dtype
img
=
_convert_input_type_range
(
img
)
*
255
...
...
mmcv/image/geometric.py
View file @
222f3807
...
...
@@ -70,7 +70,7 @@ def imresize(img,
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
h
,
w
=
img
.
shape
[:
2
]
if
backend
is
None
:
...
...
@@ -130,7 +130,7 @@ def imresize_to_multiple(img,
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
h
,
w
=
img
.
shape
[:
2
]
if
size
is
not
None
and
scale_factor
is
not
None
:
...
...
@@ -175,7 +175,7 @@ def imresize_like(img,
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
`resized_img`.
"""
h
,
w
=
dst_img
.
shape
[:
2
]
return
imresize
(
img
,
(
w
,
h
),
return_scale
,
interpolation
,
backend
=
backend
)
...
...
@@ -462,16 +462,16 @@ def impad(img,
reflect or symmetric. Default: constant.
- constant: pads with a constant value, this value is specified
with pad_val.
with pad_val.
- edge: pads with the last value at the edge of the image.
- reflect: pads with reflection of image without repeating the
last
value on the edge. For example, padding [1, 2, 3, 4]
with 2
elements on both sides in reflect mode will result
in
[3, 2, 1, 2, 3, 4, 3, 2].
- symmetric: pads with reflection of image repeating the last
value
on the edge. For example, padding [1, 2, 3, 4] with
2 elements on
both sides in symmetric mode will result in
[2, 1, 1, 2, 3, 4, 4, 3]
- reflect: pads with reflection of image without repeating the
last
value on the edge. For example, padding [1, 2, 3, 4]
with 2
elements on both sides in reflect mode will result
in
[3, 2, 1, 2, 3, 4, 3, 2].
- symmetric: pads with reflection of image repeating the last
value
on the edge. For example, padding [1, 2, 3, 4] with
2 elements on
both sides in symmetric mode will result in
[2, 1, 1, 2, 3, 4, 4, 3]
Returns:
ndarray: The padded image.
...
...
mmcv/ops/group_points.py
View file @
222f3807
...
...
@@ -76,7 +76,7 @@ class QueryAndGroup(nn.Module):
Returns:
torch.Tensor: (B, 3 + C, npoint, sample_num) Grouped
concatenated coordinates and features of points.
concatenated coordinates and features of points.
"""
# if self.max_radius is None, we will perform kNN instead of ball query
# idx is of shape [B, npoint, sample_num]
...
...
mmcv/ops/nms.py
View file @
222f3807
...
...
@@ -277,11 +277,11 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
- iou_thr (float): IoU threshold used for NMS.
- split_thr (float): threshold number of boxes. In some cases the
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
class_agnostic (bool): if true, nms is class agnostic,
i.e. IoU thresholding happens over all boxes,
regardless of the predicted class.
...
...
mmcv/ops/scatter_points.py
View file @
222f3807
...
...
@@ -25,11 +25,10 @@ class _DynamicScatter(Function):
'mean'. Default: 'max'.
Returns:
tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two
elements. The first one is the voxel features with shape [M, C]
which are respectively reduced from input features that share
the same voxel coordinates . The second is voxel coordinates
with shape [M, ndim].
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
results
=
ext_module
.
dynamic_point_to_voxel_forward
(
feats
,
coors
,
reduce_type
)
...
...
@@ -89,11 +88,10 @@ class DynamicScatter(nn.Module):
multi-dim voxel index) of each points.
Returns:
tuple[torch.Tensor]: tuple[torch.Tensor]: A tuple contains two
elements. The first one is the voxel features with shape [M, C]
which are respectively reduced from input features that share
the same voxel coordinates . The second is voxel coordinates
with shape [M, ndim].
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
reduce
=
'mean'
if
self
.
average_points
else
'max'
return
dynamic_scatter
(
points
.
contiguous
(),
coors
.
contiguous
(),
reduce
)
...
...
@@ -107,11 +105,10 @@ class DynamicScatter(nn.Module):
multi-dim voxel index) of each points.
Returns:
tuple[torch.Tensor]:tuple[torch.Tensor]: A tuple contains two
elements. The first one is the voxel features with shape [M, C]
which are respectively reduced from input features that share
the same voxel coordinates . The second is voxel coordinates
with shape [M, ndim].
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
if
coors
.
size
(
-
1
)
==
3
:
return
self
.
forward_single
(
points
,
coors
)
...
...
mmcv/runner/base_module.py
View file @
222f3807
...
...
@@ -18,13 +18,12 @@ class BaseModule(nn.Module, metaclass=ABCMeta):
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter initialization and recording
initialization information.
- ``_params_init_info``: Used to track the parameter initialization
information. This attribute only exists during executing the
``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
...
...
mmcv/runner/base_runner.py
View file @
222f3807
...
...
@@ -207,8 +207,8 @@ class BaseRunner(metaclass=ABCMeta):
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
method
will return a dict.
param groups. If the runner has a dict of optimizers, this
method
will return a dict.
"""
if
isinstance
(
self
.
optimizer
,
torch
.
optim
.
Optimizer
):
lr
=
[
group
[
'lr'
]
for
group
in
self
.
optimizer
.
param_groups
]
...
...
@@ -226,8 +226,8 @@ class BaseRunner(metaclass=ABCMeta):
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
method
will return a dict.
param groups. If the runner has a dict of optimizers, this
method
will return a dict.
"""
def
_get_momentum
(
optimizer
):
...
...
@@ -287,7 +287,7 @@ class BaseRunner(metaclass=ABCMeta):
hook_cfg (dict): Hook config. It should have at least keys 'type'
and 'priority' indicating its type and priority.
Note
s
:
Note:
The specific hook class to register should not use 'type' and
'priority' arguments during initialization.
"""
...
...
mmcv/runner/hooks/ema.py
View file @
222f3807
...
...
@@ -13,8 +13,8 @@ class EMAHook(Hook):
.. math::
\text{
Xema\_{t+1}
}
= (1 - \text{momentum}) \times
\text{
Xema\_{t}
}
+ \text{momentum} \times X_t
Xema\_{t+1} = (1 - \text{momentum}) \times
Xema\_{t} + \text{momentum} \times X_t
Args:
momentum (float): The momentum used for updating ema parameter.
...
...
mmcv/runner/hooks/logger/neptune.py
View file @
222f3807
...
...
@@ -12,19 +12,21 @@ class NeptuneLoggerHook(LoggerHook):
Args:
init_kwargs (dict): a dict contains the initialization keys as below:
- project (str): Name of a project in a form of
namespace/project_name. If None, the value of
NEPTUNE_PROJECT environment variable will be taken.
- api_token (str): User’s API token.
If None, the value of NEPTUNE_API_TOKEN environment
variable will be taken. Note: It is strongly recommended
to use NEPTUNE_API_TOKEN environment variable rather than
placing your API token in plain text in your source code.
- name (str, optional, default is 'Untitled'): Editable name of
the run. Name is displayed in the run's Details and in
Runs table as a column.
Check https://docs.neptune.ai/api-reference/neptune#init for
more init arguments.
namespace/project_name. If None, the value of NEPTUNE_PROJECT
environment variable will be taken.
- api_token (str): User’s API token. If None, the value of
NEPTUNE_API_TOKEN environment variable will be taken. Note: It is
strongly recommended to use NEPTUNE_API_TOKEN environment
variable rather than placing your API token in plain text in your
source code.
- name (str, optional, default is 'Untitled'): Editable name of the
run. Name is displayed in the run's Details and in Runs table as
a column.
Check https://docs.neptune.ai/api-reference/neptune#init for more
init arguments.
interval (int): Logging interval (every k iterations).
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
...
...
mmcv/utils/config.py
View file @
222f3807
...
...
@@ -344,7 +344,7 @@ class Config:
config str. Only py/yml/yaml/json type are supported now!
Returns:
obj:`Config`: Config obj.
:
obj:`Config`: Config obj.
"""
if
file_format
not
in
[
'.py'
,
'.json'
,
'.yaml'
,
'.yml'
]:
raise
IOError
(
'Only py/yml/yaml/json type are supported now!'
)
...
...
@@ -561,7 +561,7 @@ class Config:
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>>
# Merge list element
>>> cfg = Config(dict(pipeline=[
... dict(type='LoadImage'), dict(type='LoadAnnotations')]))
>>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})
...
...
mmcv/utils/path.py
View file @
222f3807
...
...
@@ -40,7 +40,7 @@ def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | obj:`Path`): Path of the directory.
dir_path (str |
:
obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
...
...
mmcv/utils/registry.py
View file @
222f3807
...
...
@@ -59,6 +59,7 @@ class Registry:
"""A registry to map strings to classes.
Registered object could be built from registry.
Example:
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
...
...
@@ -128,16 +129,15 @@ class Registry:
The name of the package where registry is defined will be returned.
Example:
# in mmdet/models/backbone/resnet.py
>>>
# in mmdet/models/backbone/resnet.py
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
The scope of ``ResNet`` will be ``mmdet``.
Returns:
scope (
str
)
: The inferred scope name.
str: The inferred scope name.
"""
# inspect.stack() trace where this function is called, the index-2
# indicates the frame where `infer_scope()` is called
...
...
@@ -158,8 +158,8 @@ class Registry:
None, 'ResNet'
Return:
scope (
str
,
None
)
: The first scope
.
key (str): T
he remaining key.
tuple[
str
|
None
, str]
: The
former element is the
first scope
of
the key, which can be ``None``. The latter is t
he remaining key.
"""
split_index
=
key
.
find
(
'.'
)
if
split_index
!=
-
1
:
...
...
mmcv/utils/timer.py
View file @
222f3807
...
...
@@ -12,27 +12,26 @@ class TimerError(Exception):
class
Timer
:
"""A flexible Timer class.
:Example:
>>> import time
>>> import mmcv
>>> with mmcv.Timer():
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
1.000
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
it takes 1.0 seconds
>>> timer = mmcv.Timer()
>>> time.sleep(0.5)
>>> print(timer.since_start())
0.500
>>> time.sleep(0.5)
>>> print(timer.since_last_check())
0.500
>>> print(timer.since_start())
1.000
Examples:
>>> import time
>>> import mmcv
>>> with mmcv.Timer():
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
1.000
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
it takes 1.0 seconds
>>> timer = mmcv.Timer()
>>> time.sleep(0.5)
>>> print(timer.since_start())
0.500
>>> time.sleep(0.5)
>>> print(timer.since_last_check())
0.500
>>> print(timer.since_start())
1.000
"""
def
__init__
(
self
,
start
=
True
,
print_tmpl
=
None
):
...
...
@@ -64,7 +63,8 @@ class Timer:
def
since_start
(
self
):
"""Total time since the timer is started.
Returns (float): Time in seconds.
Returns:
float: Time in seconds.
"""
if
not
self
.
_is_running
:
raise
TimerError
(
'timer is not running'
)
...
...
@@ -77,7 +77,8 @@ class Timer:
Either :func:`since_start` or :func:`since_last_check` is a checking
operation.
Returns (float): Time in seconds.
Returns:
float: Time in seconds.
"""
if
not
self
.
_is_running
:
raise
TimerError
(
'timer is not running'
)
...
...
@@ -95,21 +96,20 @@ def check_time(timer_id):
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
:Example:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Examples:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Args:
timer_id (
str
)
: Timer identifier.
str: Timer identifier.
"""
if
timer_id
not
in
_g_timers
:
_g_timers
[
timer_id
]
=
Timer
()
...
...
mmcv/video/io.py
View file @
222f3807
...
...
@@ -50,15 +50,14 @@ class VideoReader:
the second time, there is no need to decode again if it is stored in the
cache.
:Example:
>>> import mmcv
>>> v = mmcv.VideoReader('sample.mp4')
>>> len(v) # get the total frame number with `len()`
120
>>> for img in v: # v is iterable
>>> mmcv.imshow(img)
>>> v[5] # get the 6th frame
Examples:
>>> import mmcv
>>> v = mmcv.VideoReader('sample.mp4')
>>> len(v) # get the total frame number with `len()`
120
>>> for img in v: # v is iterable
>>> mmcv.imshow(img)
>>> v[5] # get the 6th frame
"""
def
__init__
(
self
,
filename
,
cache_capacity
=
10
):
...
...
@@ -189,7 +188,7 @@ class VideoReader:
Returns:
ndarray or None: If the video is fresh, return None, otherwise
return the frame.
return the frame.
"""
if
self
.
_position
==
0
:
return
None
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment