Commit c47c4480 authored by Kai Chen's avatar Kai Chen
Browse files

fix flake8 error

parent bac4c32f
from .version import __version__, short_version
__all__ = ['__version__', 'short_version']
from .rpn_ops import *
from .bbox_ops import *
from .mask_ops import *
from .losses import *
from .eval import *
from .parallel import *
from .post_processing import *
from .utils import *
from .rpn_ops import * # noqa: F401, F403
from .bbox_ops import * # noqa: F401, F403
from .mask_ops import * # noqa: F401, F403
from .losses import * # noqa: F401, F403
from .eval import * # noqa: F401, F403
from .parallel import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
# flake8: noqa
# This file is copied from Detectron.
# Copyright (c) 2017-present, Facebook, Inc.
......
from .anchor_generator import *
from .anchor_target import *
from .anchor_generator import AnchorGenerator
from .anchor_target import anchor_target
__all__ = ['AnchorGenerator', 'anchor_target']
......@@ -38,7 +38,8 @@ def _init_dist_slurm(backend, **kwargs):
raise NotImplementedError
# modified from https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9
# modified from
# https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9
def all_reduce_coalesced(tensors):
buckets = OrderedDict()
for tensor in tensors:
......
from .coco import CocoDataset
from .loader import (collate, GroupSampler, DistributedGroupSampler,
build_dataloader)
from .utils import DataContainer, to_tensor, random_scale, show_ann
__all__ = ['CocoDataset']
__all__ = [
'CocoDataset', 'collate', 'GroupSampler', 'DistributedGroupSampler',
'build_dataloader', 'DataContainer', 'to_tensor', 'random_scale',
'show_ann'
]
......@@ -117,7 +117,10 @@ class CocoDataset(Dataset):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# each mask consists of one or several polys, each poly is a list of float.
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
gt_mask_polys = []
......
from .data_container import DataContainer
from .misc import *
from .misc import to_tensor, random_scale, show_ann
__all__ = ['DataContainer', 'to_tensor', 'random_scale', 'show_ann']
from .detectors import *
from .builder import *
from .detectors import BaseDetector, RPN, FasterRCNN, MaskRCNN
from .builder import (build_neck, build_rpn_head, build_roi_extractor,
build_bbox_head, build_mask_head, build_detector)
__all__ = [
'BaseDetector', 'RPN', 'FasterRCNN', 'MaskRCNN', 'build_backbone',
'build_neck', 'build_rpn_head', 'build_roi_extractor', 'build_bbox_head',
'build_mask_head', 'build_detector'
]
from .resnet import resnet
__all__ = ['resnet']
......@@ -43,17 +43,21 @@ class ConvFCRoIHead(BBoxHead):
self.fc_out_channels = fc_out_channels
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
......
......@@ -111,7 +111,8 @@ class FPN(nn.Module):
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN)
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
......
from .conv_module import ConvModule
from .norm import build_norm_layer
from .weight_init import *
from .weight_init import xavier_init, normal_init, uniform_init, kaiming_init
__all__ = ['ConvModule', 'build_norm_layer']
__all__ = [
'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init',
'uniform_init', 'kaiming_init'
]
from .nms import nms, soft_nms
from .roi_align import RoIAlign, roi_align
from .roi_pool import RoIPool, roi_pool
__all__ = ['nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool']
from .nms_wrapper import nms, soft_nms
__all__ = ['nms', 'soft_nms']
from .functions.roi_align import roi_align
from .modules.roi_align import RoIAlign
__all__ = ['roi_align', 'RoIAlign']
......@@ -5,7 +5,7 @@ from torch.autograd import gradcheck
import os.path as osp
import sys
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_align import RoIAlign
from roi_align import RoIAlign # noqa: E402
feat_size = 15
spatial_scale = 1.0 / 8
......
from .functions.roi_pool import roi_pool
from .modules.roi_pool import RoIPool
__all__ = ['roi_pool', 'RoIPool']
......@@ -4,7 +4,7 @@ from torch.autograd import gradcheck
import os.path as osp
import sys
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_pool import RoIPool
from roi_pool import RoIPool # noqa: E402
feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda()
rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55],
......
......@@ -61,7 +61,7 @@ def get_hash():
def write_version_py():
content = """# GENERATED VERSION FILE
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment