"doc/vscode:/vscode.git/clone" did not exist on "59bcec8edf2b7fb6fa86baa0e2d561a838730e8a"
Commit f1c79392 authored by Wenwei Zhang's avatar Wenwei Zhang Committed by Kai Chen
Browse files

reformat for yapf0.29 (#1728)

* reformat for yapf0.29

* fix conflicts of yapf and flake8

* flake 8 is prior than yapf
parent 125308dd
...@@ -92,6 +92,7 @@ class AnchorGenerator(object): ...@@ -92,6 +92,7 @@ class AnchorGenerator(object):
valid_y[:valid_h] = 1 valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy valid = valid_xx & valid_yy
valid = valid[:, None].expand( valid = valid[:,
valid.size(0), self.num_base_anchors).contiguous().view(-1) None].expand(valid.size(0),
self.num_base_anchors).contiguous().view(-1)
return valid return valid
...@@ -159,7 +159,9 @@ def anchor_target_single(flat_anchors, ...@@ -159,7 +159,9 @@ def anchor_target_single(flat_anchors,
neg_inds) neg_inds)
def anchor_inside_flags(flat_anchors, valid_flags, img_shape, def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0): allowed_border=0):
img_h, img_w = img_shape[:2] img_h, img_w = img_shape[:2]
if allowed_border >= 0: if allowed_border >= 0:
......
...@@ -94,12 +94,12 @@ def ga_loc_target(gt_bboxes_list, ...@@ -94,12 +94,12 @@ def ga_loc_target(gt_bboxes_list,
# calculate positive (center) regions # calculate positive (center) regions
ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
gt_, r1, featmap_sizes[lvl]) gt_, r1, featmap_sizes[lvl])
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
1] = 1 ctr_x1:ctr_x2 + 1] = 1
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
1, ignore_x1:ignore_x2 + 1] = 0 ignore_x1:ignore_x2 + 1] = 0
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
1] = 1 ctr_x1:ctr_x2 + 1] = 1
# calculate ignore map on nearby low level feature # calculate ignore map on nearby low level feature
if lvl > 0: if lvl > 0:
d_lvl = lvl - 1 d_lvl = lvl - 1
...@@ -107,8 +107,8 @@ def ga_loc_target(gt_bboxes_list, ...@@ -107,8 +107,8 @@ def ga_loc_target(gt_bboxes_list,
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[d_lvl]) gt_, r2, featmap_sizes[d_lvl])
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
1, ignore_x1:ignore_x2 + 1] = 1 ignore_x1:ignore_x2 + 1] = 1
# calculate ignore map on nearby high level feature # calculate ignore map on nearby high level feature
if lvl < num_lvls - 1: if lvl < num_lvls - 1:
u_lvl = lvl + 1 u_lvl = lvl + 1
...@@ -116,8 +116,8 @@ def ga_loc_target(gt_bboxes_list, ...@@ -116,8 +116,8 @@ def ga_loc_target(gt_bboxes_list,
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[u_lvl]) gt_, r2, featmap_sizes[u_lvl])
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
1, ignore_x1:ignore_x2 + 1] = 1 ignore_x1:ignore_x2 + 1] = 1
for lvl_id in range(num_lvls): for lvl_id in range(num_lvls):
# ignore negative regions w.r.t. ignore map # ignore negative regions w.r.t. ignore map
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
......
...@@ -370,8 +370,8 @@ class RandomCrop(object): ...@@ -370,8 +370,8 @@ class RandomCrop(object):
if 'gt_masks' in results: if 'gt_masks' in results:
valid_gt_masks = [] valid_gt_masks = []
for i in np.where(valid_inds)[0]: for i in np.where(valid_inds)[0]:
gt_mask = results['gt_masks'][i][crop_y1:crop_y2, crop_x1: gt_mask = results['gt_masks'][i][crop_y1:crop_y2,
crop_x2] crop_x1:crop_x2]
valid_gt_masks.append(gt_mask) valid_gt_masks.append(gt_mask)
results['gt_masks'] = valid_gt_masks results['gt_masks'] = valid_gt_masks
......
...@@ -206,7 +206,11 @@ class AnchorHead(nn.Module): ...@@ -206,7 +206,11 @@ class AnchorHead(nn.Module):
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds')) @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=False): rescale=False):
""" """
Transform network output for a batch into labeled boxes. Transform network output for a batch into labeled boxes.
......
...@@ -94,31 +94,32 @@ class GuidedAnchorHead(AnchorHead): ...@@ -94,31 +94,32 @@ class GuidedAnchorHead(AnchorHead):
""" """
def __init__( def __init__(
self, self,
num_classes, num_classes,
in_channels, in_channels,
feat_channels=256, feat_channels=256,
octave_base_scale=8, octave_base_scale=8,
scales_per_octave=3, scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0], octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64], anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None, anchor_base_sizes=None,
anchoring_means=(.0, .0, .0, .0), anchoring_means=(.0, .0, .0, .0),
anchoring_stds=(1.0, 1.0, 1.0, 1.0), anchoring_stds=(1.0, 1.0, 1.0, 1.0),
target_means=(.0, .0, .0, .0), target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0), target_stds=(1.0, 1.0, 1.0, 1.0),
deformable_groups=4, deformable_groups=4,
loc_filter_thr=0.01, loc_filter_thr=0.01,
loss_loc=dict( loss_loc=dict(
type='FocalLoss', type='FocalLoss',
use_sigmoid=True, use_sigmoid=True,
gamma=2.0, gamma=2.0,
alpha=0.25, alpha=0.25,
loss_weight=1.0), loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict( loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)): # yapf: disable
super(AnchorHead, self).__init__() super(AnchorHead, self).__init__()
self.in_channels = in_channels self.in_channels = in_channels
self.num_classes = num_classes self.num_classes = num_classes
...@@ -209,7 +210,10 @@ class GuidedAnchorHead(AnchorHead): ...@@ -209,7 +210,10 @@ class GuidedAnchorHead(AnchorHead):
def forward(self, feats): def forward(self, feats):
return multi_apply(self.forward_single, feats) return multi_apply(self.forward_single, feats)
def get_sampled_approxs(self, featmap_sizes, img_metas, cfg, def get_sampled_approxs(self,
featmap_sizes,
img_metas,
cfg,
device='cuda'): device='cuda'):
"""Get sampled approxs and inside flags according to feature map sizes. """Get sampled approxs and inside flags according to feature map sizes.
......
...@@ -152,8 +152,8 @@ class MaskScoringRCNN(TwoStageDetector): ...@@ -152,8 +152,8 @@ class MaskScoringRCNN(TwoStageDetector):
# mask iou head forward and loss # mask iou head forward and loss
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels] pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = self.mask_iou_head(mask_feats, pos_mask_pred) mask_iou_pred = self.mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
), pos_labels] pos_labels]
mask_iou_targets = self.mask_iou_head.get_target( mask_iou_targets = self.mask_iou_head.get_target(
sampling_results, gt_masks, pos_mask_pred, mask_targets, sampling_results, gt_masks, pos_mask_pred, mask_targets,
self.train_cfg.rcnn) self.train_cfg.rcnn)
...@@ -193,8 +193,8 @@ class MaskScoringRCNN(TwoStageDetector): ...@@ -193,8 +193,8 @@ class MaskScoringRCNN(TwoStageDetector):
rescale) rescale)
# get mask scores with mask iou head # get mask scores with mask iou head
mask_iou_pred = self.mask_iou_head( mask_iou_pred = self.mask_iou_head(
mask_feats, mask_feats, mask_pred[range(det_labels.size(0)),
mask_pred[range(det_labels.size(0)), det_labels + 1]) det_labels + 1])
mask_scores = self.mask_iou_head.get_mask_scores( mask_scores = self.mask_iou_head.get_mask_scores(
mask_iou_pred, det_bboxes, det_labels) mask_iou_pred, det_bboxes, det_labels)
return segm_result, mask_scores return segm_result, mask_scores
...@@ -181,8 +181,8 @@ class MaskIoUHead(nn.Module): ...@@ -181,8 +181,8 @@ class MaskIoUHead(nn.Module):
mask_score = bbox_score * mask_iou mask_score = bbox_score * mask_iou
""" """
inds = range(det_labels.size(0)) inds = range(det_labels.size(0))
mask_scores = mask_iou_pred[inds, det_labels + mask_scores = mask_iou_pred[inds, det_labels + 1] * det_bboxes[inds,
1] * det_bboxes[inds, -1] -1]
mask_scores = mask_scores.cpu().numpy() mask_scores = mask_scores.cpu().numpy()
det_labels = det_labels.cpu().numpy() det_labels = det_labels.cpu().numpy()
return [ return [
......
...@@ -120,17 +120,16 @@ class GeneralizedAttention(nn.Module): ...@@ -120,17 +120,16 @@ class GeneralizedAttention(nn.Module):
(max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
for iy in range(max_len): for iy in range(max_len):
for ix in range(max_len): for ix in range(max_len):
local_constraint_map[iy, ix, local_constraint_map[
max((iy - self.spatial_range) // iy, ix,
self.kv_stride, 0):min( max((iy - self.spatial_range) //
(iy + self.spatial_range + self.kv_stride, 0):min((iy + self.spatial_range +
1) // self.kv_stride + 1) // self.kv_stride +
1, max_len), 1, max_len),
max((ix - self.spatial_range) // max((ix - self.spatial_range) //
self.kv_stride, 0):min( self.kv_stride, 0):min((ix + self.spatial_range +
(ix + self.spatial_range + 1) // self.kv_stride +
1) // self.kv_stride + 1, max_len)] = 0
1, max_len)] = 0
self.local_constraint_map = nn.Parameter( self.local_constraint_map = nn.Parameter(
torch.from_numpy(local_constraint_map).byte(), torch.from_numpy(local_constraint_map).byte(),
......
...@@ -168,8 +168,8 @@ def test_retina_ghm_forward(): ...@@ -168,8 +168,8 @@ def test_retina_ghm_forward():
batch_results.append(result) batch_results.append(result)
def _demo_mm_inputs( def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
input_shape=(1, 3, 300, 300), num_items=None, num_classes=10): num_items=None, num_classes=10): # yapf: disable
""" """
Create a superset of inputs needed to run test or train batches. Create a superset of inputs needed to run test or train batches.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment