Commit 49261dbb authored by zhangwenwei's avatar zhangwenwei
Browse files

Merge branch 'pipeline_docstrings' into 'master'

update pipeline docstrings

See merge request open-mmlab/mmdet.3d!127
parents 260b5b4a d6249fe6
...@@ -5,18 +5,16 @@ ...@@ -5,18 +5,16 @@
# This source code is licensed under the MIT license found in the # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
"""Batch mode in loading Scannet scenes with vertices and ground truth labels """Batch mode in loading Scannet scenes with vertices and ground truth labels
for semantic and instance segmentations for semantic and instance segmentations.
Usage example: python ./batch_load_scannet_data.py Usage example: python ./batch_load_scannet_data.py
""" """
import argparse import argparse
import datetime import datetime
import os
import os.path as osp
import numpy as np import numpy as np
import os
from load_scannet_data import export from load_scannet_data import export
from os import path as osp
SCANNET_DIR = 'scans' SCANNET_DIR = 'scans'
DONOTCARE_CLASS_IDS = np.array([]) DONOTCARE_CLASS_IDS = np.array([])
......
...@@ -4,16 +4,13 @@ ...@@ -4,16 +4,13 @@
# #
# This source code is licensed under the MIT license found in the # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
"""Load Scannet scenes with vertices and ground truth labels """Load Scannet scenes with vertices and ground truth labels for semantic and
for semantic and instance segmentations instance segmentations."""
"""
import argparse import argparse
import inspect import inspect
import json import json
import os
import numpy as np import numpy as np
import os
import scannet_utils import scannet_utils
currentdir = os.path.dirname( currentdir = os.path.dirname(
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# #
# This source code is licensed under the MIT license found in the # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
''' Helper class and functions for loading SUN RGB-D objects """Helper class and functions for loading SUN RGB-D objects.
Author: Charles R. Qi Author: Charles R. Qi
Date: December, 2018 Date: December, 2018
...@@ -12,14 +12,12 @@ Date: December, 2018 ...@@ -12,14 +12,12 @@ Date: December, 2018
Note: removed unused code for frustum preparation. Note: removed unused code for frustum preparation.
Changed a way for data visualization (removed depdency on mayavi). Changed a way for data visualization (removed depdency on mayavi).
Load depth with scipy.io Load depth with scipy.io
''' """
import argparse import argparse
import os
import sys
import numpy as np import numpy as np
import os
import sunrgbd_utils import sunrgbd_utils
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR) sys.path.append(BASE_DIR)
...@@ -32,8 +30,7 @@ DEFAULT_TYPE_WHITELIST = [ ...@@ -32,8 +30,7 @@ DEFAULT_TYPE_WHITELIST = [
def random_sampling(pc, num_sample, replace=None, return_choices=False): def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC """Input is NxC, output is num_samplexC."""
"""
if replace is None: if replace is None:
replace = (pc.shape[0] < num_sample) replace = (pc.shape[0] < num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace) choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
...@@ -106,10 +103,9 @@ def extract_sunrgbd_data(idx_filename, ...@@ -106,10 +103,9 @@ def extract_sunrgbd_data(idx_filename,
save_votes=False, save_votes=False,
use_v1=False, use_v1=False,
skip_empty_scene=True): skip_empty_scene=True):
"""Extract scene point clouds and """Extract scene point clouds and bounding boxes (centroids, box sizes,
bounding boxes (centroids, box sizes, heading angles, heading angles, semantic classes). Dumped point clouds and boxes are in
semantic classes). Dumped point clouds and boxes are in upright depth coord.
upright depth coord.
Args: Args:
idx_filename: a TXT file where each line is an int number (index) idx_filename: a TXT file where each line is an int number (index)
......
...@@ -13,10 +13,9 @@ Updated by Charles R. Qi ...@@ -13,10 +13,9 @@ Updated by Charles R. Qi
Date: December, 2018 Date: December, 2018
Note: removed basis loading. Note: removed basis loading.
""" """
import cv2 import cv2
import numpy as np import numpy as np
import scipy.io as sio from scipy import io as sio
type2class = { type2class = {
'bed': 0, 'bed': 0,
...@@ -81,7 +80,7 @@ class SUNObject3d(object): ...@@ -81,7 +80,7 @@ class SUNObject3d(object):
class SUNRGBD_Calibration(object): class SUNRGBD_Calibration(object):
"""Calibration matrices and utils """Calibration matrices and utils.
We define five coordinate system in SUN RGBD dataset: We define five coordinate system in SUN RGBD dataset:
...@@ -254,8 +253,8 @@ def my_compute_box_3d(center, size, heading_angle): ...@@ -254,8 +253,8 @@ def my_compute_box_3d(center, size, heading_angle):
def compute_box_3d(obj, calib): def compute_box_3d(obj, calib):
"""Takes an object and a projection matrix (P) and projects the 3d """Takes an object and a projection matrix (P) and projects the 3d bounding
bounding box into the image plane. box into the image plane.
Args: Args:
obj (SUNObject3d): Instance of SUNObject3d. obj (SUNObject3d): Instance of SUNObject3d.
......
...@@ -286,6 +286,6 @@ class Custom3DDataset(Dataset): ...@@ -286,6 +286,6 @@ class Custom3DDataset(Dataset):
Images with aspect ratio greater than 1 will be set as group 1, Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0. In 3D datasets, they are all the same, thus are all otherwise group 0. In 3D datasets, they are all the same, thus are all
zeros zeros.
""" """
self.flag = np.zeros(len(self), dtype=np.uint8) self.flag = np.zeros(len(self), dtype=np.uint8)
...@@ -10,6 +10,13 @@ warnings.filterwarnings('ignore', category=NumbaPerformanceWarning) ...@@ -10,6 +10,13 @@ warnings.filterwarnings('ignore', category=NumbaPerformanceWarning)
@numba.njit @numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T): def _rotation_box2d_jit_(corners, angle, rot_mat_T):
"""Rotate 2D boxes.
Args:
corners (np.ndarray): Corners of boxes.
angle (float): Rotation angle.
rot_mat_T (np.ndarray): Transposed rotation matrix.
"""
rot_sin = np.sin(angle) rot_sin = np.sin(angle)
rot_cos = np.cos(angle) rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 0] = rot_cos
...@@ -21,6 +28,14 @@ def _rotation_box2d_jit_(corners, angle, rot_mat_T): ...@@ -21,6 +28,14 @@ def _rotation_box2d_jit_(corners, angle, rot_mat_T):
@numba.jit(nopython=True) @numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True): def box_collision_test(boxes, qboxes, clockwise=True):
"""Box collision test.
Args:
boxes (np.ndarray): Corners of current boxes.
qboxes (np.ndarray): Boxes to be avoid colliding.
clockwise (bool): Whether the corners are in clockwise order.
Default: True.
"""
N = boxes.shape[0] N = boxes.shape[0]
K = qboxes.shape[0] K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_) ret = np.zeros((N, K), dtype=np.bool_)
...@@ -110,10 +125,19 @@ def box_collision_test(boxes, qboxes, clockwise=True): ...@@ -110,10 +125,19 @@ def box_collision_test(boxes, qboxes, clockwise=True):
@numba.njit @numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises): def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5] """Add noise to every box (only on the horizontal plane).
# valid_mask: [N]
# loc_noises: [N, M, 3] Args:
# rot_noises: [N, M] boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0] num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1] num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes) box_corners = box_np_ops.box2d_to_corner_jit(boxes)
...@@ -143,10 +167,20 @@ def noise_per_box(boxes, valid_mask, loc_noises, rot_noises): ...@@ -143,10 +167,20 @@ def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
@numba.njit @numba.njit
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
global_rot_noises): global_rot_noises):
# boxes: [N, 5] """Add noise to every box (only on the horizontal plane). Version 2 used
# valid_mask: [N] when enable global rotations.
# loc_noises: [N, M, 3]
# rot_noises: [N, M] Args:
boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0] num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1] num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes) box_corners = box_np_ops.box2d_to_corner_jit(boxes)
...@@ -198,6 +232,15 @@ def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, ...@@ -198,6 +232,15 @@ def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
def _select_transform(transform, indices): def _select_transform(transform, indices):
"""Select transform.
Args:
transform (np.ndarray): Transforms to select from.
indices (np.ndarray): Mask to indicate which transform to select.
Returns:
np.ndarray: Selected transforms.
"""
result = np.zeros((transform.shape[0], *transform.shape[2:]), result = np.zeros((transform.shape[0], *transform.shape[2:]),
dtype=transform.dtype) dtype=transform.dtype)
for i in range(transform.shape[0]): for i in range(transform.shape[0]):
...@@ -208,6 +251,13 @@ def _select_transform(transform, indices): ...@@ -208,6 +251,13 @@ def _select_transform(transform, indices):
@numba.njit @numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis): def _rotation_matrix_3d_(rot_mat_T, angle, axis):
"""Get the 3D rotation matrix.
Args:
rot_mat_T (np.ndarray): Transposed rotation matrix.
angle (float): Rotation angle.
axis (int): Rotation axis.
"""
rot_sin = np.sin(angle) rot_sin = np.sin(angle)
rot_cos = np.cos(angle) rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3) rot_mat_T[:] = np.eye(3)
...@@ -231,6 +281,17 @@ def _rotation_matrix_3d_(rot_mat_T, angle, axis): ...@@ -231,6 +281,17 @@ def _rotation_matrix_3d_(rot_mat_T, angle, axis):
@numba.njit @numba.njit
def points_transform_(points, centers, point_masks, loc_transform, def points_transform_(points, centers, point_masks, loc_transform,
rot_transform, valid_mask): rot_transform, valid_mask):
"""Apply transforms to points and box centers.
Args:
points (np.ndarray): Input points.
centers (np.ndarray): Input box centers.
point_masks (np.ndarray): Mask to indicate which points need
to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray): Mask to indicate which boxes are valid.
"""
num_box = centers.shape[0] num_box = centers.shape[0]
num_points = points.shape[0] num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype) rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
...@@ -249,6 +310,14 @@ def points_transform_(points, centers, point_masks, loc_transform, ...@@ -249,6 +310,14 @@ def points_transform_(points, centers, point_masks, loc_transform,
@numba.njit @numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask): def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
"""Transform 3D boxes.
Args:
boxes (np.ndarray): 3D boxes to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
"""
num_box = boxes.shape[0] num_box = boxes.shape[0]
for i in range(num_box): for i in range(num_box):
if valid_mask[i]: if valid_mask[i]:
...@@ -263,12 +332,21 @@ def noise_per_object_v3_(gt_boxes, ...@@ -263,12 +332,21 @@ def noise_per_object_v3_(gt_boxes,
center_noise_std=1.0, center_noise_std=1.0,
global_random_rot_range=np.pi / 4, global_random_rot_range=np.pi / 4,
num_try=100): num_try=100):
"""random rotate or remove each groundtrutn independently. use kitti viewer """Random rotate or remove each groundtruth independently. use kitti viewer
to test this function points_transform_ to test this function points_transform_
Args: Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_ gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7).
points: [M, 4], point cloud in lidar. points (np.ndarray | None): Input point cloud with shape (M, 4).
Default: None.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
Default: None.
rotation_perturb (float): Rotation perturbation. Default: pi / 4.
center_noise_std (float): Center noise standard deviation.
Default: 1.0.
global_random_rot_range (float): Global random rotation range.
Default: pi/4.
num_try (int): Number of try. Default: 100.
""" """
num_boxes = gt_boxes.shape[0] num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)): if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
......
...@@ -9,6 +9,15 @@ from ..registry import OBJECTSAMPLERS ...@@ -9,6 +9,15 @@ from ..registry import OBJECTSAMPLERS
class BatchSampler: class BatchSampler:
"""Class for sampling specific category of ground truths.
Args:
sample_list (list[dict]): List of samples.
name (str | None): The category of samples. Default: None.
epoch (int | None): Sampling epoch. Default: None.
shuffle (bool): Whether to shuffle indices. Default: False.
drop_reminder (bool): Drop reminder. Default: False.
"""
def __init__(self, def __init__(self,
sampled_list, sampled_list,
...@@ -29,6 +38,14 @@ class BatchSampler: ...@@ -29,6 +38,14 @@ class BatchSampler:
self._drop_reminder = drop_reminder self._drop_reminder = drop_reminder
def _sample(self, num): def _sample(self, num):
"""Sample specific number of ground truths and return indices.
Args:
num (int): Sampled number.
Returns:
list[int]: Indices of sampled ground truths.
"""
if self._idx + num >= self._example_num: if self._idx + num >= self._example_num:
ret = self._indices[self._idx:].copy() ret = self._indices[self._idx:].copy()
self._reset() self._reset()
...@@ -38,6 +55,7 @@ class BatchSampler: ...@@ -38,6 +55,7 @@ class BatchSampler:
return ret return ret
def _reset(self): def _reset(self):
"""Reset the index of batchsampler to zero."""
assert self._name is not None assert self._name is not None
# print("reset", self._name) # print("reset", self._name)
if self._shuffle: if self._shuffle:
...@@ -45,6 +63,14 @@ class BatchSampler: ...@@ -45,6 +63,14 @@ class BatchSampler:
self._idx = 0 self._idx = 0
def sample(self, num): def sample(self, num):
"""Sample specific number of ground truths.
Args:
num (int): Sampled number.
Returns:
list[dict]: Sampled ground truths.
"""
indices = self._sample(num) indices = self._sample(num)
return [self._sampled_list[i] for i in indices] return [self._sampled_list[i] for i in indices]
......
...@@ -70,6 +70,53 @@ class DefaultFormatBundle(object): ...@@ -70,6 +70,53 @@ class DefaultFormatBundle(object):
@PIPELINES.register_module() @PIPELINES.register_module()
class Collect3D(object): class Collect3D(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "lidar2img": transform from lidar to image
- 'pcd_horizontal_flip': a boolean indicating if point cloud is
flipped horizontally
- 'pcd_vertical_flip': a boolean indicating if point cloud is
flipped vertically
- 'box_mode_3d': 3D box mode
- 'box_type_3d': 3D box type
- 'img_norm_cfg': a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
- 'rect': rectification matrix
- 'Trv2c': transformation from velodyne to camera coordinate
- 'P2': transformation betweeen cameras
- 'pcd_trans': point cloud transformations
- 'sample_idx': sample index
- 'pcd_scale_factor': point cloud scale factor
- 'pcd_rotation': rotation applied to point cloud
- 'pts_filename': path to point cloud file.
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_shape', 'img_shape', 'lidar2img',
'pad_shape', 'scale_factor', 'flip', 'pcd_horizontal_flip',
'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', 'img_norm_cfg',
'rect', 'Trv2c', 'P2', 'pcd_trans', 'sample_idx',
'pcd_scale_factor', 'pcd_rotation', 'pts_filename')``
"""
def __init__(self, def __init__(self,
keys, keys,
......
...@@ -61,7 +61,6 @@ class PointwiseSemanticHead(nn.Module): ...@@ -61,7 +61,6 @@ class PointwiseSemanticHead(nn.Module):
- seg_preds (torch.Tensor): segment predictions - seg_preds (torch.Tensor): segment predictions
- part_preds (torch.Tensor): part predictions - part_preds (torch.Tensor): part predictions
- part_feats (torch.Tensor): feature predictions - part_feats (torch.Tensor): feature predictions
""" """
seg_preds = self.seg_cls_layer(x) # (N, 1) seg_preds = self.seg_cls_layer(x) # (N, 1)
part_preds = self.seg_reg_layer(x) # (N, 3) part_preds = self.seg_reg_layer(x) # (N, 3)
......
from setuptools import find_packages, setup
import os import os
import subprocess import subprocess
import time import time
from setuptools import find_packages, setup
import torch import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension, from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension) CUDAExtension)
...@@ -119,26 +119,26 @@ def make_cuda_ext(name, ...@@ -119,26 +119,26 @@ def make_cuda_ext(name,
def parse_requirements(fname='requirements.txt', with_version=True): def parse_requirements(fname='requirements.txt', with_version=True):
""" """Parse the package dependencies listed in a requirements file but strips
Parse the package dependencies listed in a requirements file but strips
specific versioning information. specific versioning information.
Args: Args:
fname (str): path to requirements file fname (str): path to requirements file
with_version (bool, default=False): if True include version specs with_version (bool, default=False): if True include version specs
Returns: Returns:
List[str]: list of requirements items list[str]: list of requirements items
CommandLine: CommandLine:
python -c "import setup; print(setup.parse_requirements())" python -c "import setup; print(setup.parse_requirements())"
""" """
import re
import sys import sys
from os.path import exists from os.path import exists
import re
require_fpath = fname require_fpath = fname
def parse_line(line): def parse_line(line):
""" """Parse information from a line in a requirements text file."""
Parse information from a line in a requirements text file
"""
if line.startswith('-r '): if line.startswith('-r '):
# Allow specifying requirements in other files # Allow specifying requirements in other files
target = line.split(' ')[1] target = line.split(' ')[1]
......
""" """Tests the Assigner objects.
Tests the Assigner objects.
CommandLine: CommandLine:
pytest tests/test_assigner.py pytest tests/test_assigner.py
xdoctest tests/test_assigner.py zero xdoctest tests/test_assigner.py zero
""" """
import torch import torch
...@@ -65,9 +61,7 @@ def test_max_iou_assigner_with_ignore(): ...@@ -65,9 +61,7 @@ def test_max_iou_assigner_with_ignore():
def test_max_iou_assigner_with_empty_gt(): def test_max_iou_assigner_with_empty_gt():
""" """Test corner case where an image might have no true detections."""
Test corner case where an image might have no true detections
"""
self = MaxIoUAssigner( self = MaxIoUAssigner(
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.5, neg_iou_thr=0.5,
...@@ -86,9 +80,7 @@ def test_max_iou_assigner_with_empty_gt(): ...@@ -86,9 +80,7 @@ def test_max_iou_assigner_with_empty_gt():
def test_max_iou_assigner_with_empty_boxes(): def test_max_iou_assigner_with_empty_boxes():
""" """Test corner case where an network might predict no boxes."""
Test corner case where an network might predict no boxes
"""
self = MaxIoUAssigner( self = MaxIoUAssigner(
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.5, neg_iou_thr=0.5,
...@@ -112,10 +104,8 @@ def test_max_iou_assigner_with_empty_boxes(): ...@@ -112,10 +104,8 @@ def test_max_iou_assigner_with_empty_boxes():
def test_max_iou_assigner_with_empty_boxes_and_ignore(): def test_max_iou_assigner_with_empty_boxes_and_ignore():
""" """Test corner case where an network might predict no boxes and
Test corner case where an network might predict no boxes and ignore_iof_thr ignore_iof_thr is on."""
is on
"""
self = MaxIoUAssigner( self = MaxIoUAssigner(
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.5, neg_iou_thr=0.5,
...@@ -148,9 +138,7 @@ def test_max_iou_assigner_with_empty_boxes_and_ignore(): ...@@ -148,9 +138,7 @@ def test_max_iou_assigner_with_empty_boxes_and_ignore():
def test_max_iou_assigner_with_empty_boxes_and_gt(): def test_max_iou_assigner_with_empty_boxes_and_gt():
""" """Test corner case where an network might predict no boxes and no gt."""
Test corner case where an network might predict no boxes and no gt
"""
self = MaxIoUAssigner( self = MaxIoUAssigner(
pos_iou_thr=0.5, pos_iou_thr=0.5,
neg_iou_thr=0.5, neg_iou_thr=0.5,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment