Commit 088919f6 authored by pangjm's avatar pangjm
Browse files

Update single base version

parent 108fc9e1
from .dist_utils import *
from .hooks import *
from .misc import *
import os
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.utils import clip_grad
from mmcv.torchpack import Hook, OptimizerStepperHook
__all__ = [
'init_dist', 'average_gradients', 'broadcast_params',
'DistOptimizerStepperHook', 'DistSamplerSeedHook'
]
def init_dist(world_size,
rank,
backend='gloo',
master_ip='127.0.0.1',
port=29500):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
os.environ['MASTER_ADDR'] = master_ip
os.environ['MASTER_PORT'] = str(port)
if backend == 'nccl':
dist.init_process_group(backend='nccl')
else:
dist.init_process_group(
backend='gloo', rank=rank, world_size=world_size)
def average_gradients(model):
for param in model.parameters():
if param.requires_grad and not (param.grad is None):
dist.all_reduce(param.grad.data)
def broadcast_params(model):
for p in model.state_dict().values():
dist.broadcast(p, 0)
class DistOptimizerStepperHook(OptimizerStepperHook):
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
average_gradients(runner.model)
if self.grad_clip:
clip_grad.clip_grad_norm_(
filter(lambda p: p.requires_grad, runner.model.parameters()),
max_norm=self.max_norm,
norm_type=self.norm_type)
runner.optimizer.step()
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
runner.data_loader.sampler.set_epoch(runner.epoch)
...@@ -7,10 +7,16 @@ import mmcv ...@@ -7,10 +7,16 @@ import mmcv
import numpy as np import numpy as np
import torch import torch
from mmcv.torchpack import Hook from mmcv.torchpack import Hook
from mmdet import collate, scatter from mmdet.datasets import collate
from mmdet.nn.parallel import scatter
from pycocotools.cocoeval import COCOeval from pycocotools.cocoeval import COCOeval
from .eval import eval_recalls from ..eval import eval_recalls
__all__ = [
'EmptyCacheHook', 'DistEvalHook', 'DistEvalRecallHook',
'CocoDistEvalmAPHook'
]
class EmptyCacheHook(Hook): class EmptyCacheHook(Hook):
...@@ -237,10 +243,3 @@ class CocoDistEvalmAPHook(DistEvalHook, CocoEvalMixin): ...@@ -237,10 +243,3 @@ class CocoDistEvalmAPHook(DistEvalHook, CocoEvalMixin):
runner.log_buffer.output[field] = cocoEval.stats[0] runner.log_buffer.output[field] = cocoEval.stats[0]
runner.log_buffer.ready = True runner.log_buffer.ready = True
os.remove(tmp_file) os.remove(tmp_file)
class CocoDistCascadeEvalmAPHook(CocoDistEvalmAPHook):
def evaluate(self, runner, results):
results = [res[-1] for res in results]
super(CocoDistCascadeEvalmAPHook, self).evaluate(runner, results)
import subprocess
import mmcv
import numpy as np
import torch
__all__ = ['tensor2imgs', 'unique', 'unmap', 'results2json']
def tensor2imgs(tensor,
color_order='RGB',
color_mean=(0.485, 0.456, 0.406),
color_std=(0.229, 0.224, 0.225)):
assert color_order in ['RGB', 'BGR']
img_per_gpu = tensor.size(0)
color_mean = np.array(color_mean, dtype=np.float32)
color_std = np.array(color_std, dtype=np.float32)
imgs = []
for img_id in range(img_per_gpu):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
if color_order == 'RGB':
img = mmcv.rgb2bgr(img)
img = img * color_std + color_mean
imgs.append(np.ascontiguousarray(img))
return imgs
def unique(tensor):
if tensor.is_cuda:
u_tensor = np.unique(tensor.cpu().numpy())
return tensor.new_tensor(u_tensor)
else:
return torch.unique(tensor)
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
segms = seg[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
json_results.append(data)
return json_results
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def results2json(dataset, results, out_file):
if isinstance(results[0], list):
json_results = det2json(dataset, results)
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
else:
raise TypeError('invalid type of results')
mmcv.dump(json_results, out_file)
...@@ -71,7 +71,6 @@ def parse_ann_info(ann_info, cat2label, with_mask=True): ...@@ -71,7 +71,6 @@ def parse_ann_info(ann_info, cat2label, with_mask=True):
class CocoDataset(Dataset): class CocoDataset(Dataset):
def __init__(self, def __init__(self,
ann_file, ann_file,
img_prefix, img_prefix,
...@@ -253,31 +252,38 @@ class CocoDataset(Dataset): ...@@ -253,31 +252,38 @@ class CocoDataset(Dataset):
def prepare_test_img(self, idx): def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)""" """Prepare an image for testing (multi-scale and flipping)"""
img_info = self._load_info(idx, with_ann=False) img_info = self.img_infos[idx]
img_file = osp.join(self.prefix, img_info['file_name']) img = mmcv.imread(osp.join(self.img_prefix, img_info['file_name']))
proposal = (self.proposals[idx][:, :4] proposal = (self.proposals[idx][:, :4]
if self.proposals is not None else None) if self.proposals is not None else None)
def prepare_single(img_file, scale, flip, proposal=None): def prepare_single(img, scale, flip, proposal=None):
img_np, shape_scale_np = self.img_transform(img_file, scale, flip) _img, _img_shape, _scale_factor = self.img_transform(
img, shape_scale = self.numpy2tensor(img_np, shape_scale_np) img, scale, flip)
img_meta = dict(shape_scale=shape_scale, flip=flip) img, img_shape, scale_factor = self.numpy2tensor(
_img, _img_shape, _scale_factor)
ori_shape = (img_info['height'], img_info['width'])
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None: if proposal is not None:
proposal = self.bbox_transform(proposal, shape_scale_np, flip) proposal = self.bbox_transform(proposal, _scale_factor, flip)
proposal = self.numpy2tensor(proposal) proposal = self.numpy2tensor(proposal)
return img, img_meta, proposal return img, img_meta, proposal
imgs = [] imgs = []
img_metas = [] img_metas = []
proposals = [] proposals = []
for scale in self.img_scale: for scale in self.img_scales:
img, img_meta, proposal = prepare_single(img_file, scale, False, img, img_meta, proposal = prepare_single(img, scale, False,
proposal) proposal)
imgs.append(img) imgs.append(img)
img_metas.append(img_meta) img_metas.append(img_meta)
proposals.append(proposal) proposals.append(proposal)
if self.flip_ratio > 0: if self.flip_ratio > 0:
img, img_meta, prop = prepare_single(img_file, scale, True, img, img_meta, prop = prepare_single(img, scale, True,
proposal) proposal)
imgs.append(img) imgs.append(img)
img_metas.append(img_meta) img_metas.append(img_meta)
......
from functools import partial
import torch
from .coco import CocoDataset
from .collate import collate
from .sampler import GroupSampler, DistributedGroupSampler
def build_data(cfg, args):
dataset = CocoDataset(**cfg)
if args.dist:
sampler = DistributedGroupSampler(dataset, args.img_per_gpu,
args.world_size, args.rank)
batch_size = args.img_per_gpu
num_workers = args.data_workers
else:
sampler = GroupSampler(dataset, args.img_per_gpu)
batch_size = args.world_size * args.img_per_gpu
num_workers = args.world_size * args.data_workers
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.img_per_gpu,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=args.img_per_gpu),
pin_memory=False)
return loader
import mmcv import mmcv
# import cvbase as cvb
import numpy as np import numpy as np
import torch import torch
from mmdet.core import segms from mmdet.core.mask_ops import segms
__all__ = [ __all__ = [
'ImageTransform', 'BboxTransform', 'PolyMaskTransform', 'Numpy2Tensor' 'ImageTransform', 'BboxTransform', 'PolyMaskTransform', 'Numpy2Tensor'
...@@ -64,7 +63,7 @@ class ImageTransform(object): ...@@ -64,7 +63,7 @@ class ImageTransform(object):
class ImageCrop(object): class ImageCrop(object):
"""crop image patches and resize patches into fixed size """crop image patches and resize patches into fixed size
1. (read and) flip image (if needed) 1. (read and) flip image (if needed)
2. crop image patches according to given bboxes 2. crop image patches according to given bboxes
3. resize patches into fixed size (default 224x224) 3. resize patches into fixed size (default 224x224)
4. normalize the image (if needed) 4. normalize the image (if needed)
...@@ -126,6 +125,8 @@ class BboxTransform(object): ...@@ -126,6 +125,8 @@ class BboxTransform(object):
gt_bboxes = bboxes * scale_factor gt_bboxes = bboxes * scale_factor
if flip: if flip:
gt_bboxes = mmcv.bbox_flip(gt_bboxes, img_shape) gt_bboxes = mmcv.bbox_flip(gt_bboxes, img_shape)
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1])
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0])
if self.max_num_gts is None: if self.max_num_gts is None:
return gt_bboxes return gt_bboxes
else: else:
...@@ -205,4 +206,4 @@ class Numpy2Tensor(object): ...@@ -205,4 +206,4 @@ class Numpy2Tensor(object):
if len(args) == 1: if len(args) == 1:
return torch.from_numpy(args[0]) return torch.from_numpy(args[0])
else: else:
return tuple([torch.from_numpy(array) for array in args]) return tuple([torch.from_numpy(np.array(array)) for array in args])
from .detectors import Detector
from .bbox_head import BBoxHead from .bbox_head import BBoxHead
from .convfc_bbox_head import ConvFCRoIHead, SharedFCRoIHead
__all__ = ['BBoxHead'] __all__ = ['BBoxHead', 'ConvFCRoIHead', 'SharedFCRoIHead']
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from mmdet.core import (bbox_transform_inv, bbox_target, multiclass_nms, from mmdet.core import (bbox_transform_inv, multiclass_nms, bbox_target,
weighted_cross_entropy, weighted_smoothl1, accuracy) weighted_cross_entropy, weighted_smoothl1, accuracy)
...@@ -10,7 +10,6 @@ class BBoxHead(nn.Module): ...@@ -10,7 +10,6 @@ class BBoxHead(nn.Module):
regression respectively""" regression respectively"""
def __init__(self, def __init__(self,
exclude_mal_box=True,
with_avg_pool=False, with_avg_pool=False,
with_cls=True, with_cls=True,
with_reg=True, with_reg=True,
...@@ -31,7 +30,6 @@ class BBoxHead(nn.Module): ...@@ -31,7 +30,6 @@ class BBoxHead(nn.Module):
self.target_means = target_means self.target_means = target_means
self.target_stds = target_stds self.target_stds = target_stds
self.reg_class_agnostic = reg_class_agnostic self.reg_class_agnostic = reg_class_agnostic
self.exclude_mal_box = exclude_mal_box
in_channels = self.in_channels in_channels = self.in_channels
if self.with_avg_pool: if self.with_avg_pool:
...@@ -61,7 +59,7 @@ class BBoxHead(nn.Module): ...@@ -61,7 +59,7 @@ class BBoxHead(nn.Module):
bbox_pred = self.fc_reg(x) if self.with_reg else None bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred return cls_score, bbox_pred
def bbox_target(self, pos_proposals, neg_proposals, pos_gt_bboxes, def get_bbox_target(self, pos_proposals, neg_proposals, pos_gt_bboxes,
pos_gt_labels, rcnn_train_cfg): pos_gt_labels, rcnn_train_cfg):
reg_num_classes = 1 if self.reg_class_agnostic else self.num_classes reg_num_classes = 1 if self.reg_class_agnostic else self.num_classes
cls_reg_targets = bbox_target( cls_reg_targets = bbox_target(
...@@ -69,11 +67,10 @@ class BBoxHead(nn.Module): ...@@ -69,11 +67,10 @@ class BBoxHead(nn.Module):
neg_proposals, neg_proposals,
pos_gt_bboxes, pos_gt_bboxes,
pos_gt_labels, pos_gt_labels,
self.target_means,
self.target_stds,
rcnn_train_cfg, rcnn_train_cfg,
reg_num_classes, reg_num_classes,
debug_imgs=self.debug_imgs) target_means=self.target_means,
target_stds=self.target_stds)
return cls_reg_targets return cls_reg_targets
def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets,
...@@ -96,6 +93,7 @@ class BBoxHead(nn.Module): ...@@ -96,6 +93,7 @@ class BBoxHead(nn.Module):
cls_score, cls_score,
bbox_pred, bbox_pred,
img_shape, img_shape,
scale_factor,
rescale=False, rescale=False,
nms_cfg=None): nms_cfg=None):
if isinstance(cls_score, list): if isinstance(cls_score, list):
...@@ -111,7 +109,7 @@ class BBoxHead(nn.Module): ...@@ -111,7 +109,7 @@ class BBoxHead(nn.Module):
# TODO: add clip here # TODO: add clip here
if rescale: if rescale:
bboxes /= img_shape[-1] bboxes /= scale_factor.float()
if nms_cfg is None: if nms_cfg is None:
return bboxes, scores return bboxes, scores
......
import torch.nn as nn
from .bbox_head import BBoxHead
from ..utils import ConvModule
class ConvFCRoIHead(BBoxHead):
"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
"""
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
*args,
**kwargs):
super(ConvFCRoIHead, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs
+ num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= (self.roi_feat_size * self.roi_feat_size)
if self.num_reg_fcs == 0:
self.reg_last_dim *= (self.roi_feat_size * self.roi_feat_size)
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else
4 * self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim
if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
normalize=self.normalize,
bias=self.with_bias))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= (self.roi_feat_size * self.roi_feat_size)
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim
if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCRoIHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), -1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), -1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
class SharedFCRoIHead(ConvFCRoIHead):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(SharedFCRoIHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
import mmcv import mmcv
from mmcv import torchpack
from torch import nn from torch import nn
from . import (backbones, necks, roi_extractors, rpn_heads, bbox_heads, from . import (backbones, necks, roi_extractors, rpn_heads, bbox_heads,
...@@ -11,7 +12,7 @@ __all__ = [ ...@@ -11,7 +12,7 @@ __all__ = [
def _build_module(cfg, parrent=None): def _build_module(cfg, parrent=None):
return cfg if isinstance(cfg, nn.Module) else mmcv.obj_from_dict( return cfg if isinstance(cfg, nn.Module) else torchpack.obj_from_dict(
cfg, parrent) cfg, parrent)
......
...@@ -2,137 +2,141 @@ import torch ...@@ -2,137 +2,141 @@ import torch
import torch.nn as nn import torch.nn as nn
from .. import builder from .. import builder
from mmdet.core.utils import tensor2imgs
from mmdet.core import (bbox2roi, bbox_mapping, split_combined_gt_polys, from mmdet.core import (bbox2roi, bbox_mapping, split_combined_gt_polys,
bbox_sampling, multiclass_nms, merge_aug_proposals, bbox2result, multiclass_nms, merge_aug_proposals,
merge_aug_bboxes, merge_aug_masks, bbox2result) merge_aug_bboxes, merge_aug_masks, sample_proposals)
class TwoStageDetector(nn.Module): class Detector(nn.Module):
def __init__(self, def __init__(self,
backbone, backbone,
neck, neck=None,
rpn_head, rpn_head=None,
roi_block, roi_block=None,
bbox_head, bbox_head=None,
rpn_train_cfg,
rpn_test_cfg,
rcnn_train_cfg,
rcnn_test_cfg,
mask_block=None, mask_block=None,
mask_head=None, mask_head=None,
rpn_train_cfg=None,
rpn_test_cfg=None,
rcnn_train_cfg=None,
rcnn_test_cfg=None,
pretrained=None): pretrained=None):
super(TwoStageDetector, self).__init__() super(Detector, self).__init__()
self.backbone = builder.build_backbone(backbone) self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck) if neck is not None else None
self.rpn_head = builder.build_rpn_head(rpn_head)
self.bbox_roi_extractor = builder.build_roi_block(roi_block)
self.bbox_head = builder.build_bbox_head(bbox_head)
self.mask_roi_extractor = builder.build_roi_block(mask_block) if (
mask_block is not None) else None
self.mask_head = builder.build_mask_head(mask_head) if (
mask_head is not None) else None
self.with_mask = False if self.mask_head is None else True
self.rpn_train_cfg = rpn_train_cfg self.with_neck = True if neck is not None else False
self.rpn_test_cfg = rpn_test_cfg if self.with_neck:
self.rcnn_train_cfg = rcnn_train_cfg self.neck = builder.build_neck(neck)
self.rcnn_test_cfg = rcnn_test_cfg
self.with_rpn = True if rpn_head is not None else False
if self.with_rpn:
self.rpn_head = builder.build_rpn_head(rpn_head)
self.rpn_train_cfg = rpn_train_cfg
self.rpn_test_cfg = rpn_test_cfg
self.with_bbox = True if bbox_head is not None else False
if self.with_bbox:
self.bbox_roi_extractor = builder.build_roi_extractor(roi_block)
self.bbox_head = builder.build_bbox_head(bbox_head)
self.rcnn_train_cfg = rcnn_train_cfg
self.rcnn_test_cfg = rcnn_test_cfg
self.with_mask = True if mask_head is not None else False
if self.with_mask:
self.mask_roi_extractor = builder.build_roi_extractor(mask_block)
self.mask_head = builder.build_mask_head(mask_head)
self.init_weights(pretrained=pretrained) self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None): def init_weights(self, pretrained=None):
if pretrained is not None: if pretrained is not None:
print('load model from: {}'.format(pretrained)) print('load model from: {}'.format(pretrained))
self.backbone.init_weights(pretrained=pretrained) self.backbone.init_weights(pretrained=pretrained)
if self.neck is not None: if self.with_neck:
if isinstance(self.neck, nn.Sequential): if isinstance(self.neck, nn.Sequential):
for m in self.neck: for m in self.neck:
m.init_weights() m.init_weights()
else: else:
self.neck.init_weights() self.neck.init_weights()
self.rpn_head.init_weights() if self.with_rpn:
self.bbox_roi_extractor.init_weights() self.rpn_head.init_weights()
self.bbox_head.init_weights() if self.with_bbox:
if self.mask_roi_extractor is not None: self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_mask:
self.mask_roi_extractor.init_weights() self.mask_roi_extractor.init_weights()
if self.mask_head is not None:
self.mask_head.init_weights() self.mask_head.init_weights()
def forward(self, def forward(self,
img, img,
img_meta, img_meta,
gt_bboxes=None, gt_bboxes=None,
proposals=None,
gt_labels=None, gt_labels=None,
gt_ignore=None, gt_bboxes_ignore=None,
gt_polys=None, gt_mask_polys=None,
gt_poly_lens=None, gt_poly_lens=None,
num_polys_per_mask=None, num_polys_per_mask=None,
return_loss=True, return_loss=True,
return_bboxes=False, return_bboxes=True,
rescale=False): rescale=False):
if not return_loss: assert proposals is not None or self.with_rpn, "Only one of proposals file and RPN can exist."
return self.test(img, img_meta, rescale)
if not self.with_mask: if not return_loss:
assert (gt_polys is None and gt_poly_lens is None return self.test(img, img_meta, proposals, rescale)
and num_polys_per_mask is None)
else: else:
assert (gt_polys is not None and gt_poly_lens is not None losses = dict()
and num_polys_per_mask is not None)
gt_polys = split_combined_gt_polys(gt_polys, gt_poly_lens,
num_polys_per_mask)
if self.rpn_train_cfg.get('debug', False):
self.rpn_head.debug_imgs = tensor2imgs(img)
if self.rcnn_train_cfg.get('debug', False):
self.bbox_head.debug_imgs = tensor2imgs(img)
if self.mask_head is not None:
self.mask_head.debug_imgs = tensor2imgs(img)
img_shapes = img_meta['shape_scale']
img_shapes = img_meta['img_shape']
x = self.backbone(img) x = self.backbone(img)
if self.neck is not None:
if self.with_neck:
x = self.neck(x) x = self.neck(x)
rpn_outs = self.rpn_head(x) if self.with_rpn:
proposal_inputs = rpn_outs + (img_shapes, self.rpn_test_cfg) rpn_outs = self.rpn_head(x)
proposal_list = self.rpn_head.get_proposals(*proposal_inputs) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_shapes,
self.rpn_train_cfg)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs)
losses.update(rpn_losses)
(pos_inds, neg_inds, pos_proposals, neg_proposals, if self.with_bbox:
pos_assigned_gt_inds, pos_gt_bboxes, pos_gt_labels) = bbox_sampling( if self.with_rpn:
proposal_list, gt_bboxes, gt_ignore, gt_labels, proposal_inputs = rpn_outs + (img_shapes, self.rpn_test_cfg)
self.rcnn_train_cfg) proposal_list = self.rpn_head.get_proposals(*proposal_inputs)
else:
proposal_list = proposals
labels, label_weights, bbox_targets, bbox_weights = \ (pos_inds, neg_inds, pos_proposals, neg_proposals,
self.bbox_head.proposal_target( pos_assigned_gt_inds,
pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, pos_gt_bboxes, pos_gt_labels) = sample_proposals(
self.rcnn_train_cfg) proposal_list, gt_bboxes, gt_bboxes_ignore, gt_labels,
self.rcnn_train_cfg)
rois = bbox2roi([ labels, label_weights, bbox_targets, bbox_weights = \
torch.cat([pos, neg], dim=0) self.bbox_head.get_bbox_target(
for pos, neg in zip(pos_proposals, neg_proposals) pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels,
]) self.rcnn_train_cfg)
# TODO: a more flexible way to configurate feat maps
roi_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
cls_score, bbox_pred = self.bbox_head(roi_feats)
losses = dict() rois = bbox2roi([
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_shapes, torch.cat([pos, neg], dim=0)
self.rpn_train_cfg) for pos, neg in zip(pos_proposals, neg_proposals)
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs) ])
losses.update(rpn_losses) # TODO: a more flexible way to configurate feat maps
roi_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
cls_score, bbox_pred = self.bbox_head(roi_feats)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, labels, loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, labels,
label_weights, bbox_targets, label_weights, bbox_targets,
bbox_weights) bbox_weights)
losses.update(loss_bbox) losses.update(loss_bbox)
if self.with_mask: if self.with_mask:
mask_targets = self.mask_head.mask_target( gt_polys = split_combined_gt_polys(gt_mask_polys, gt_poly_lens,
pos_proposals, pos_assigned_gt_inds, gt_polys, img_shapes, num_polys_per_mask)
mask_targets = self.mask_head.get_mask_target(
pos_proposals, pos_assigned_gt_inds, gt_polys, img_meta,
self.rcnn_train_cfg) self.rcnn_train_cfg)
pos_rois = bbox2roi(pos_proposals) pos_rois = bbox2roi(pos_proposals)
mask_feats = self.mask_roi_extractor( mask_feats = self.mask_roi_extractor(
...@@ -142,36 +146,40 @@ class TwoStageDetector(nn.Module): ...@@ -142,36 +146,40 @@ class TwoStageDetector(nn.Module):
torch.cat(pos_gt_labels)) torch.cat(pos_gt_labels))
return losses return losses
def test(self, imgs, img_metas, rescale=False): def test(self, imgs, img_metas, proposals=None, rescale=False):
"""Test w/ or w/o augmentations.""" """Test w/ or w/o augmentations."""
assert isinstance(imgs, list) and isinstance(img_metas, list) assert isinstance(imgs, list) and isinstance(img_metas, list)
assert len(imgs) == len(img_metas) assert len(imgs) == len(img_metas)
img_per_gpu = imgs[0].size(0) img_per_gpu = imgs[0].size(0)
assert img_per_gpu == 1 assert img_per_gpu == 1
if len(imgs) == 1: if len(imgs) == 1:
return self.simple_test(imgs[0], img_metas[0], rescale) return self.simple_test(imgs[0], img_metas[0], proposals, rescale)
else: else:
return self.aug_test(imgs, img_metas, rescale) return self.aug_test(imgs, img_metas, proposals, rescale)
def simple_test_bboxes(self, x, img_meta, rescale=False):
"""Test only det bboxes without augmentation."""
img_shapes = img_meta['shape_scale'] def simple_test_rpn(self, x, img_meta):
img_shapes = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
rpn_outs = self.rpn_head(x) rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_shapes, self.rpn_test_cfg) proposal_inputs = rpn_outs + (img_shapes, self.rpn_test_cfg)
proposal_list = self.rpn_head.get_proposals(*proposal_inputs) proposal_list = self.rpn_head.get_proposals(*proposal_inputs)[0]
return proposal_list
rois = bbox2roi(proposal_list) def simple_test_bboxes(self, x, img_meta, proposals, rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor( roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois) x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
cls_score, bbox_pred = self.bbox_head(roi_feats) cls_score, bbox_pred = self.bbox_head(roi_feats)
# image shape of the first image in the batch (only one) # image shape of the first image in the batch (only one)
img_shape = img_shapes[0] img_shape = img_meta['img_shape'][0]
scale_factor = img_meta['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes( det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois, rois,
cls_score, cls_score,
bbox_pred, bbox_pred,
img_shape, img_shape,
scale_factor,
rescale=rescale, rescale=rescale,
nms_cfg=self.rcnn_test_cfg) nms_cfg=self.rcnn_test_cfg)
return det_bboxes, det_labels return det_bboxes, det_labels
...@@ -183,41 +191,52 @@ class TwoStageDetector(nn.Module): ...@@ -183,41 +191,52 @@ class TwoStageDetector(nn.Module):
det_labels, det_labels,
rescale=False): rescale=False):
# image shape of the first image in the batch (only one) # image shape of the first image in the batch (only one)
img_shape = img_meta['shape_scale'][0] img_shape = img_meta['img_shape'][0]
scale_factor = img_meta['scale_factor']
if det_bboxes.shape[0] == 0: if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else: else:
# if det_bboxes is rescaled to the original image size, we need to # if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs. # rescale it back to the testing scale to obtain RoIs.
_bboxes = (det_bboxes[:, :4] * img_shape[-1] _bboxes = (det_bboxes[:, :4] * scale_factor.float()
if rescale else det_bboxes) if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes]) mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor( mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
mask_pred = self.mask_head(mask_feats) mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks( segm_result = self.mask_head.get_seg_masks(
mask_pred, det_bboxes, det_labels, img_shape, mask_pred,
self.rcnn_test_cfg, rescale) det_bboxes,
det_labels,
self.rcnn_test_cfg,
ori_scale=img_meta['ori_shape'])
return segm_result return segm_result
def simple_test(self, img, img_meta, rescale=False): def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation.""" """Test without augmentation."""
# get feature maps # get feature maps
x = self.backbone(img) x = self.backbone(img)
if self.neck is not None: if self.with_neck:
x = self.neck(x) x = self.neck(x)
det_bboxes, det_labels = self.simple_test_bboxes( if self.with_rpn:
x, img_meta, rescale=rescale) proposals = self.simple_test_rpn(x, img_meta)
bbox_result = bbox2result(det_bboxes, det_labels, if self.with_bbox:
self.bbox_head.num_classes) # BUG proposals shape?
if not self.with_mask: det_bboxes, det_labels = self.simple_test_bboxes(
return bbox_result x, img_meta, [proposals], rescale=rescale)
bbox_result = bbox2result(det_bboxes, det_labels,
segm_result = self.simple_test_mask( self.bbox_head.num_classes)
x, img_meta, det_bboxes, det_labels, rescale=rescale) if not self.with_mask:
return bbox_result
return bbox_result, segm_result segm_result = self.simple_test_mask(
x, img_meta, det_bboxes, det_labels, rescale=rescale)
return bbox_result, segm_result
else:
proposals[:, :4] /= img_meta['scale_factor'].float()
return proposals.cpu().numpy()
# TODO aug test haven't been verified
def aug_test_bboxes(self, imgs, img_metas): def aug_test_bboxes(self, imgs, img_metas):
"""Test with augmentations for det bboxes.""" """Test with augmentations for det bboxes."""
# step 1: get RPN proposals for augmented images, apply NMS to the # step 1: get RPN proposals for augmented images, apply NMS to the
......
import torch.nn as nn
from mmdet.core import tensor2imgs, merge_aug_proposals, bbox_mapping
from .. import builder
class RPN(nn.Module):
def __init__(self,
backbone,
neck,
rpn_head,
rpn_train_cfg,
rpn_test_cfg,
pretrained=None):
super(RPN, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck) if neck is not None else None
self.rpn_head = builder.build_rpn_head(rpn_head)
self.rpn_train_cfg = rpn_train_cfg
self.rpn_test_cfg = rpn_test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
if pretrained is not None:
print('load model from: {}'.format(pretrained))
self.backbone.init_weights(pretrained=pretrained)
if self.neck is not None:
self.neck.init_weights()
self.rpn_head.init_weights()
def forward(self,
img,
img_meta,
gt_bboxes=None,
return_loss=True,
return_bboxes=False,
rescale=False):
if not return_loss:
return self.test(img, img_meta, rescale)
img_shapes = img_meta['shape_scale']
if self.rpn_train_cfg.get('debug', False):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.backbone(img)
if self.neck is not None:
x = self.neck(x)
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_shapes,
self.rpn_train_cfg)
losses = self.rpn_head.loss(*rpn_loss_inputs)
return losses
def test(self, imgs, img_metas, rescale=False):
"""Test w/ or w/o augmentations."""
assert isinstance(imgs, list) and isinstance(img_metas, list)
assert len(imgs) == len(img_metas)
img_per_gpu = imgs[0].size(0)
assert img_per_gpu == 1
if len(imgs) == 1:
return self.simple_test(imgs[0], img_metas[0], rescale)
else:
return self.aug_test(imgs, img_metas, rescale)
def simple_test(self, img, img_meta, rescale=False):
img_shapes = img_meta['shape_scale']
# get feature maps
x = self.backbone(img)
if self.neck is not None:
x = self.neck(x)
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_shapes, self.rpn_test_cfg)
proposals = self.rpn_head.get_proposals(*proposal_inputs)[0]
if rescale:
proposals[:, :4] /= img_shapes[0][-1]
return proposals.cpu().numpy()
def aug_test(self, imgs, img_metas, rescale=False):
aug_proposals = []
for img, img_meta in zip(imgs, img_metas):
x = self.backbone(img)
if self.neck is not None:
x = self.neck(x)
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_meta['shape_scale'],
self.rpn_test_cfg)
proposal_list = self.rpn_head.get_proposals(*proposal_inputs)
assert len(proposal_list) == 1
aug_proposals.append(proposal_list[0]) # len(proposal_list) = 1
merged_proposals = merge_aug_proposals(aug_proposals, img_metas,
self.rpn_test_cfg)
if not rescale:
img_shape = img_metas[0]['shape_scale'][0]
flip = img_metas[0]['flip'][0]
merged_proposals[:, :4] = bbox_mapping(merged_proposals[:, :4],
img_shape, flip)
return merged_proposals.cpu().numpy()
...@@ -3,10 +3,9 @@ import numpy as np ...@@ -3,10 +3,9 @@ import numpy as np
import pycocotools.mask as mask_util import pycocotools.mask as mask_util
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.utils.checkpoint as cp
from ..common import ConvModule from ..utils import ConvModule
from mmdet.core import mask_target, mask_cross_entropy from mmdet.core import mask_cross_entropy, mask_target
class FCNMaskHead(nn.Module): class FCNMaskHead(nn.Module):
...@@ -21,7 +20,6 @@ class FCNMaskHead(nn.Module): ...@@ -21,7 +20,6 @@ class FCNMaskHead(nn.Module):
upsample_ratio=2, upsample_ratio=2,
num_classes=81, num_classes=81,
class_agnostic=False, class_agnostic=False,
with_cp=False,
normalize=None): normalize=None):
super(FCNMaskHead, self).__init__() super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']: if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
...@@ -39,7 +37,6 @@ class FCNMaskHead(nn.Module): ...@@ -39,7 +37,6 @@ class FCNMaskHead(nn.Module):
self.class_agnostic = class_agnostic self.class_agnostic = class_agnostic
self.normalize = normalize self.normalize = normalize
self.with_bias = normalize is None self.with_bias = normalize is None
self.with_cp = with_cp
self.convs = nn.ModuleList() self.convs = nn.ModuleList()
for i in range(self.num_convs): for i in range(self.num_convs):
...@@ -79,25 +76,9 @@ class FCNMaskHead(nn.Module): ...@@ -79,25 +76,9 @@ class FCNMaskHead(nn.Module):
m.weight, mode='fan_out', nonlinearity='relu') m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0) nn.init.constant_(m.bias, 0)
def convs_forward(self, x):
def m_lvl_convs_forward(x):
for conv in self.convs[1:-1]:
x = conv(x)
return x
if self.num_convs > 0:
x = self.convs[0](x)
if self.num_convs > 1:
if self.with_cp and x.requires_grad:
x = cp.checkpoint(m_lvl_convs_forward, x)
else:
x = m_lvl_convs_forward(x)
x = self.convs[-1](x)
return x
def forward(self, x): def forward(self, x):
x = self.convs_forward(x) for conv in self.convs:
x = conv(x)
if self.upsample is not None: if self.upsample is not None:
x = self.upsample(x) x = self.upsample(x)
if self.upsample_method == 'deconv': if self.upsample_method == 'deconv':
...@@ -105,24 +86,18 @@ class FCNMaskHead(nn.Module): ...@@ -105,24 +86,18 @@ class FCNMaskHead(nn.Module):
mask_pred = self.conv_logits(x) mask_pred = self.conv_logits(x)
return mask_pred return mask_pred
def mask_target(self, pos_proposals, pos_assigned_gt_inds, gt_masks, def get_mask_target(self, pos_proposals, pos_assigned_gt_inds, gt_masks,
img_shapes, rcnn_train_cfg): img_meta, rcnn_train_cfg):
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, img_shapes, rcnn_train_cfg) gt_masks, img_meta, rcnn_train_cfg)
return mask_targets return mask_targets
def loss(self, mask_pred, mask_targets, labels): def loss(self, mask_pred, mask_targets, labels):
loss_mask = mask_cross_entropy(mask_pred, mask_targets, labels) loss_mask = mask_cross_entropy(mask_pred, mask_targets, labels)
return loss_mask return loss_mask
def get_seg_masks(self, def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
mask_pred, ori_scale):
det_bboxes,
det_labels,
img_shape,
rcnn_test_cfg,
ori_scale,
rescale=True):
"""Get segmentation masks from mask_pred and bboxes """Get segmentation masks from mask_pred and bboxes
Args: Args:
mask_pred (Tensor or ndarray): shape (n, #class+1, h, w). mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
...@@ -143,14 +118,11 @@ class FCNMaskHead(nn.Module): ...@@ -143,14 +118,11 @@ class FCNMaskHead(nn.Module):
cls_segms = [[] for _ in range(self.num_classes - 1)] cls_segms = [[] for _ in range(self.num_classes - 1)]
bboxes = det_bboxes.cpu().numpy()[:, :4] bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy() + 1 labels = det_labels.cpu().numpy() + 1
scale_factor = img_shape[-1] if rescale else 1.0 img_h = ori_scale[0]
img_h = ori_scale['height'] if rescale else np.round( img_w = ori_scale[1]
ori_scale['height'].item() * img_shape[-1].item()).astype(np.int32)
img_w = ori_scale['width'] if rescale else np.round(
ori_scale['width'].item() * img_shape[-1].item()).astype(np.int32)
for i in range(bboxes.shape[0]): for i in range(bboxes.shape[0]):
bbox = (bboxes[i, :] / float(scale_factor)).astype(int) bbox = bboxes[i, :].astype(int)
label = labels[i] label = labels[i]
w = bbox[2] - bbox[0] + 1 w = bbox[2] - bbox[0] + 1
h = bbox[3] - bbox[1] + 1 h = bbox[3] - bbox[1] + 1
...@@ -164,7 +136,7 @@ class FCNMaskHead(nn.Module): ...@@ -164,7 +136,7 @@ class FCNMaskHead(nn.Module):
im_mask = np.zeros((img_h, img_w), dtype=np.float32) im_mask = np.zeros((img_h, img_w), dtype=np.float32)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = mmcv.resize( im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = mmcv.imresize(
mask_pred_, (w, h)) mask_pred_, (w, h))
# im_mask = cv2.resize(im_mask, (img_w, img_h)) # im_mask = cv2.resize(im_mask, (img_w, img_h))
im_mask = np.array( im_mask = np.array(
......
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from ..common import ConvModule from ..utils import ConvModule
from ..weight_init import xavier_init from ..utils import xavier_init
class FPN(nn.Module): class FPN(nn.Module):
......
...@@ -9,8 +9,8 @@ from mmdet.core import (AnchorGenerator, anchor_target, bbox_transform_inv, ...@@ -9,8 +9,8 @@ from mmdet.core import (AnchorGenerator, anchor_target, bbox_transform_inv,
weighted_cross_entropy, weighted_smoothl1, weighted_cross_entropy, weighted_smoothl1,
weighted_binary_cross_entropy) weighted_binary_cross_entropy)
from mmdet.ops import nms from mmdet.ops import nms
from ..misc import multi_apply from ..utils import multi_apply
from ..weight_init import normal_init from ..utils import normal_init
class RPNHead(nn.Module): class RPNHead(nn.Module):
......
from .conv_module import ConvModule from .conv_module import ConvModule
from .norm import build_norm_layer from .norm import build_norm_layer
from .misc import *
from .weight_init import *
__all__ = ['ConvModule', 'build_norm_layer'] __all__ = ['ConvModule', 'build_norm_layer']
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment