Commit 97d08556 authored by Jiangmiao Pang's avatar Jiangmiao Pang Committed by Kai Chen
Browse files

Reformat use yapf 0.27.0 & Add yapf style file (#672)

* Reformat use yapf 0.27.0

* Add yapf style file
parent 4bbb4a2a
[style]
BASED_ON_STYLE = pep8
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
......@@ -11,9 +11,7 @@ model = dict(
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
deformable_groups=1,
fallback_on_stride=False),
modulated=False, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
......
......@@ -11,9 +11,7 @@ model = dict(
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
deformable_groups=1,
fallback_on_stride=False),
modulated=False, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
......
......@@ -10,9 +10,7 @@ model = dict(
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
deformable_groups=1,
fallback_on_stride=False),
modulated=False, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
......
......@@ -10,9 +10,7 @@ model = dict(
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=True,
deformable_groups=1,
fallback_on_stride=False),
modulated=True, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
......
......@@ -10,9 +10,7 @@ model = dict(
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
deformable_groups=1,
fallback_on_stride=False),
modulated=False, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
......
......@@ -99,9 +99,7 @@ test_cfg = dict(
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100))
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
......
......@@ -124,8 +124,7 @@ def show_result(img, result, class_names, score_thr=0.3, out_file=None):
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
......
......@@ -91,8 +91,8 @@ def build_optimizer(model, optimizer_cfg):
paramwise_options = optimizer_cfg.pop('paramwise_options', None)
# if no paramwise option is specified, just use the global setting
if paramwise_options is None:
return obj_from_dict(optimizer_cfg, torch.optim,
dict(params=model.parameters()))
return obj_from_dict(
optimizer_cfg, torch.optim, dict(params=model.parameters()))
else:
assert isinstance(paramwise_options, dict)
# get base lr and weight decay
......
......@@ -7,8 +7,7 @@ def build_assigner(cfg, **kwargs):
if isinstance(cfg, assigners.BaseAssigner):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(
cfg, assigners, default_args=kwargs)
return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(
type(cfg)))
......@@ -18,8 +17,7 @@ def build_sampler(cfg, **kwargs):
if isinstance(cfg, samplers.BaseSampler):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(
cfg, samplers, default_args=kwargs)
return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(
type(cfg)))
......
......@@ -261,8 +261,8 @@ def eval_map(det_results,
cls_dets, cls_gts, cls_gt_ignore = get_cls_results(
det_results, gt_bboxes, gt_labels, gt_ignore, i)
# calculate tp and fp for each image
tpfp_func = (tpfp_imagenet
if dataset in ['det', 'vid'] else tpfp_default)
tpfp_func = (
tpfp_imagenet if dataset in ['det', 'vid'] else tpfp_default)
tpfp = [
tpfp_func(cls_dets[j], cls_gts[j], cls_gt_ignore[j], iou_thr,
area_ranges) for j in range(len(cls_dets))
......
......@@ -45,9 +45,8 @@ def multiclass_nms(multi_bboxes,
_scores *= score_factors[cls_inds]
cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
cls_dets, _ = nms_op(cls_dets, **nms_cfg_)
cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ),
i - 1,
dtype=torch.long)
cls_labels = multi_bboxes.new_full(
(cls_dets.shape[0], ), i - 1, dtype=torch.long)
bboxes.append(cls_dets)
labels.append(cls_labels)
if bboxes:
......
......@@ -91,9 +91,7 @@ class Expand(object):
class RandomCrop(object):
def __init__(self,
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3):
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
......
from .build_loader import build_dataloader
from .sampler import GroupSampler, DistributedGroupSampler
__all__ = [
'GroupSampler', 'DistributedGroupSampler', 'build_dataloader'
]
__all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader']
......@@ -25,10 +25,8 @@ def build_dataloader(dataset,
sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
world_size, rank)
else:
sampler = DistributedSampler(dataset,
world_size,
rank,
shuffle=False)
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
......@@ -36,13 +34,13 @@ def build_dataloader(dataset,
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
data_loader = DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate,
samples_per_gpu=imgs_per_gpu),
pin_memory=False,
**kwargs)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
**kwargs)
return data_loader
......@@ -139,8 +139,8 @@ class DistributedGroupSampler(Sampler):
indices = [
indices[j] for i in list(
torch.randperm(len(indices) // self.samples_per_gpu,
generator=g))
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
......
......@@ -34,8 +34,8 @@ class ImageTransform(object):
else:
img, w_scale, h_scale = mmcv.imresize(
img, scale, return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = img.shape
img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb)
if flip:
......
......@@ -196,8 +196,9 @@ class AnchorHead(nn.Module):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (num_total_pos if self.cls_focal_loss else
num_total_pos + num_total_neg)
num_total_samples = (
num_total_pos
if self.cls_focal_loss else num_total_pos + num_total_neg)
losses_cls, losses_reg = multi_apply(
self.loss_single,
cls_scores,
......
......@@ -43,12 +43,7 @@ class BasicBlock(nn.Module):
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
3,
padding=1,
bias=False)
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
......
......@@ -94,8 +94,8 @@ class ConvFCBBoxHead(BBoxHead):
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim
if i == 0 else self.conv_out_channels)
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
......@@ -114,8 +114,8 @@ class ConvFCBBoxHead(BBoxHead):
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= (self.roi_feat_size * self.roi_feat_size)
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim
if i == 0 else self.fc_out_channels)
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment