Commit 7eb02d29 authored by Kai Chen's avatar Kai Chen
Browse files

Merge branch 'dev' into single-stage

parents 20e75c22 01a03aab
from .segms import (flip_segms, polys_to_mask, mask_to_bbox,
polys_to_mask_wrt_box, polys_to_boxes, rle_mask_voting,
rle_mask_nms, rle_masks_to_boxes)
from .utils import split_combined_polys
from .mask_target import mask_target
__all__ = [
'flip_segms', 'polys_to_mask', 'mask_to_bbox', 'polys_to_mask_wrt_box',
'polys_to_boxes', 'rle_mask_voting', 'rle_mask_nms', 'rle_masks_to_boxes',
'split_combined_polys', 'mask_target'
]
# flake8: noqa
# This file is copied from Detectron.
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for interacting with segmentation masks in the COCO format.
The following terms are used in this module
mask: a binary mask encoded as a 2D numpy array
segm: a segmentation mask in one of the two COCO formats (polygon or RLE)
polygon: COCO's polygon format
RLE: COCO's run length encoding format
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pycocotools.mask as mask_util
def flip_segms(segms, height, width):
"""Left/right flip each mask in a list of masks."""
def _flip_poly(poly, width):
flipped_poly = np.array(poly)
flipped_poly[0::2] = width - np.array(poly[0::2]) - 1
return flipped_poly.tolist()
def _flip_rle(rle, height, width):
if 'counts' in rle and type(rle['counts']) == list:
# Magic RLE format handling painfully discovered by looking at the
# COCO API showAnns function.
rle = mask_util.frPyObjects([rle], height, width)
mask = mask_util.decode(rle)
mask = mask[:, ::-1, :]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
flipped_segms = []
for segm in segms:
if type(segm) == list:
# Polygon format
flipped_segms.append([_flip_poly(poly, width) for poly in segm])
else:
# RLE format
assert type(segm) == dict
flipped_segms.append(_flip_rle(segm, height, width))
return flipped_segms
def polys_to_mask(polygons, height, width):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed inside a height x width image. The resulting
mask is therefore of shape (height, width).
"""
rle = mask_util.frPyObjects(polygons, height, width)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
def mask_to_bbox(mask):
"""Compute the tight bounding box of a binary mask."""
xs = np.where(np.sum(mask, axis=0) > 0)[0]
ys = np.where(np.sum(mask, axis=1) > 0)[0]
if len(xs) == 0 or len(ys) == 0:
return None
x0 = xs[0]
x1 = xs[-1]
y0 = ys[0]
y1 = ys[-1]
return np.array((x0, y0, x1, y1), dtype=np.float32)
def polys_to_mask_wrt_box(polygons, box, M):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed in the given box and rasterized to an M x M
mask. The resulting mask is therefore of shape (M, M).
"""
w = box[2] - box[0]
h = box[3] - box[1]
w = np.maximum(w, 1)
h = np.maximum(h, 1)
polygons_norm = []
for poly in polygons:
p = np.array(poly, dtype=np.float32)
p[0::2] = (p[0::2] - box[0]) * M / w
p[1::2] = (p[1::2] - box[1]) * M / h
polygons_norm.append(p)
rle = mask_util.frPyObjects(polygons_norm, M, M)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
def polys_to_boxes(polys):
"""Convert a list of polygons into an array of tight bounding boxes."""
boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
for i in range(len(polys)):
poly = polys[i]
x0 = min(min(p[::2]) for p in poly)
x1 = max(max(p[::2]) for p in poly)
y0 = min(min(p[1::2]) for p in poly)
y1 = max(max(p[1::2]) for p in poly)
boxes_from_polys[i, :] = [x0, y0, x1, y1]
return boxes_from_polys
def rle_mask_voting(top_masks,
all_masks,
all_dets,
iou_thresh,
binarize_thresh,
method='AVG'):
"""Returns new masks (in correspondence with `top_masks`) by combining
multiple overlapping masks coming from the pool of `all_masks`. Two methods
for combining masks are supported: 'AVG' uses a weighted average of
overlapping mask pixels; 'UNION' takes the union of all mask pixels.
"""
if len(top_masks) == 0:
return
all_not_crowd = [False] * len(all_masks)
top_to_all_overlaps = mask_util.iou(top_masks, all_masks, all_not_crowd)
decoded_all_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in all_masks
]
decoded_top_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in top_masks
]
all_boxes = all_dets[:, :4].astype(np.int32)
all_scores = all_dets[:, 4]
# Fill box support with weights
mask_shape = decoded_all_masks[0].shape
mask_weights = np.zeros((len(all_masks), mask_shape[0], mask_shape[1]))
for k in range(len(all_masks)):
ref_box = all_boxes[k]
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, mask_shape[1])
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, mask_shape[0])
mask_weights[k, y_0:y_1, x_0:x_1] = all_scores[k]
mask_weights = np.maximum(mask_weights, 1e-5)
top_segms_out = []
for k in range(len(top_masks)):
# Corner case of empty mask
if decoded_top_masks[k].sum() == 0:
top_segms_out.append(top_masks[k])
continue
inds_to_vote = np.where(top_to_all_overlaps[k] >= iou_thresh)[0]
# Only matches itself
if len(inds_to_vote) == 1:
top_segms_out.append(top_masks[k])
continue
masks_to_vote = [decoded_all_masks[i] for i in inds_to_vote]
if method == 'AVG':
ws = mask_weights[inds_to_vote]
soft_mask = np.average(masks_to_vote, axis=0, weights=ws)
mask = np.array(soft_mask > binarize_thresh, dtype=np.uint8)
elif method == 'UNION':
# Any pixel that's on joins the mask
soft_mask = np.sum(masks_to_vote, axis=0)
mask = np.array(soft_mask > 1e-5, dtype=np.uint8)
else:
raise NotImplementedError('Method {} is unknown'.format(method))
rle = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
top_segms_out.append(rle)
return top_segms_out
def rle_mask_nms(masks, dets, thresh, mode='IOU'):
"""Performs greedy non-maximum suppression based on an overlap measurement
between masks. The type of measurement is determined by `mode` and can be
either 'IOU' (standard intersection over union) or 'IOMA' (intersection over
mininum area).
"""
if len(masks) == 0:
return []
if len(masks) == 1:
return [0]
if mode == 'IOU':
# Computes ious[m1, m2] = area(intersect(m1, m2)) / area(union(m1, m2))
all_not_crowds = [False] * len(masks)
ious = mask_util.iou(masks, masks, all_not_crowds)
elif mode == 'IOMA':
# Computes ious[m1, m2] = area(intersect(m1, m2)) / min(area(m1), area(m2))
all_crowds = [True] * len(masks)
# ious[m1, m2] = area(intersect(m1, m2)) / area(m2)
ious = mask_util.iou(masks, masks, all_crowds)
# ... = max(area(intersect(m1, m2)) / area(m2),
# area(intersect(m2, m1)) / area(m1))
ious = np.maximum(ious, ious.transpose())
elif mode == 'CONTAINMENT':
# Computes ious[m1, m2] = area(intersect(m1, m2)) / area(m2)
# Which measures how much m2 is contained inside m1
all_crowds = [True] * len(masks)
ious = mask_util.iou(masks, masks, all_crowds)
else:
raise NotImplementedError('Mode {} is unknown'.format(mode))
scores = dets[:, 4]
order = np.argsort(-scores)
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = ious[i, order[1:]]
inds_to_keep = np.where(ovr <= thresh)[0]
order = order[inds_to_keep + 1]
return keep
def rle_masks_to_boxes(masks):
"""Computes the bounding box of each mask in a list of RLE encoded masks."""
if len(masks) == 0:
return []
decoded_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in masks
]
def get_bounds(flat_mask):
inds = np.where(flat_mask > 0)[0]
return inds.min(), inds.max()
boxes = np.zeros((len(decoded_masks), 4))
keep = [True] * len(decoded_masks)
for i, mask in enumerate(decoded_masks):
if mask.sum() == 0:
keep[i] = False
continue
flat_mask = mask.sum(axis=0)
x0, x1 = get_bounds(flat_mask)
flat_mask = mask.sum(axis=1)
y0, y1 = get_bounds(flat_mask)
boxes[i, :] = (x0, y0, x1, y1)
return boxes, np.where(keep)[0]
from .data_parallel import MMDataParallel
from .distributed import MMDistributedDataParallel
from .scatter_gather import scatter, scatter_kwargs
__all__ = [
'MMDataParallel', 'MMDistributedDataParallel', 'scatter', 'scatter_kwargs'
]
import torch
from torch.nn.parallel._functions import _get_stream
def scatter(input, devices, streams=None):
"""Scatters tensor across multiple GPUs.
"""
if streams is None:
streams = [None] * len(devices)
if isinstance(input, list):
chunk_size = (len(input) - 1) // len(devices) + 1
outputs = [
scatter(input[i], [devices[i // chunk_size]],
[streams[i // chunk_size]]) for i in range(len(input))
]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
# TODO: copy to a pinned buffer first (if copying from CPU)
stream = streams[0] if output.numel() > 0 else None
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception('Unknown type {}.'.format(type(input)))
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = len(output) // len(devices)
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[i * chunk_size + j], [devices[i]],
[streams[i]])
elif isinstance(output, torch.Tensor):
if output.numel() != 0:
with torch.cuda.device(devices[0]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[0])
output.record_stream(main_stream)
else:
raise Exception('Unknown type {}.'.format(type(output)))
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if input_device != -1:
return input_device
return -1
elif isinstance(input, torch.Tensor):
return input.get_device() if input.is_cuda else -1
else:
raise Exception('Unknown type {}.'.format(type(input)))
class Scatter(object):
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
# Synchronize with the copy stream
if streams is not None:
synchronize_stream(outputs, target_gpus, streams)
return tuple(outputs)
from torch.nn.parallel import DataParallel
from .scatter_gather import scatter_kwargs
class MMDataParallel(DataParallel):
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
import torch
import torch.distributed as dist
import torch.nn as nn
from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
_take_tensors)
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True):
super(MMDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buffers
self.broadcast_bucket_size = 32 * 1024 * 1024
self._sync_params()
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for tensor, synced in zip(
tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
module_states = list(self.module.state_dict().values())
if len(module_states) > 0:
self._dist_broadcast_coalesced(module_states,
self.broadcast_bucket_size)
if self.broadcast_buffers:
buffers = [b.data for b in self.module._all_buffers()]
if len(buffers) > 0:
self._dist_broadcast_coalesced(buffers,
self.broadcast_bucket_size)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
import torch
from torch.nn.parallel._functions import Scatter as OrigScatter
from ._functions import Scatter
from mmdet.datasets.utils import DataContainer
def scatter(inputs, target_gpus, dim=0):
"""Scatter inputs to target gpus.
The only difference from original :func:`scatter` is to add support for
:type:`~mmdet.DataContainer`.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return OrigScatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if isinstance(obj, dict) and len(obj) > 0:
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
...@@ -3,7 +3,7 @@ import torch ...@@ -3,7 +3,7 @@ import torch
import numpy as np import numpy as np
from mmdet.ops import nms from mmdet.ops import nms
from ..bbox_ops import bbox_mapping_back from ..bbox import bbox_mapping_back
def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg): def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg):
......
from .dist_utils import (init_dist, reduce_grads, DistOptimizerHook, from .dist_utils import init_dist, allreduce_grads, DistOptimizerHook
DistSamplerSeedHook)
from .misc import tensor2imgs, unmap, multi_apply from .misc import tensor2imgs, unmap, multi_apply
__all__ = [ __all__ = [
'init_dist', 'reduce_grads', 'DistOptimizerHook', 'DistSamplerSeedHook', 'init_dist', 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs',
'tensor2imgs', 'unmap', 'multi_apply' 'unmap', 'multi_apply'
] ]
...@@ -4,9 +4,9 @@ from collections import OrderedDict ...@@ -4,9 +4,9 @@ from collections import OrderedDict
import torch import torch
import torch.multiprocessing as mp import torch.multiprocessing as mp
import torch.distributed as dist import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
from torch.nn.utils import clip_grad _take_tensors)
from mmcv.runner import Hook, OptimizerHook from mmcv.runner import OptimizerHook
def init_dist(launcher, backend='nccl', **kwargs): def init_dist(launcher, backend='nccl', **kwargs):
...@@ -38,59 +38,52 @@ def _init_dist_slurm(backend, **kwargs): ...@@ -38,59 +38,52 @@ def _init_dist_slurm(backend, **kwargs):
raise NotImplementedError raise NotImplementedError
# modified from def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
# https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9 if bucket_size_mb > 0:
def all_reduce_coalesced(tensors): bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = OrderedDict() buckets = _take_tensors(tensors, bucket_size_bytes)
for tensor in tensors: else:
tp = tensor.type() buckets = OrderedDict()
if tp not in buckets: for tensor in tensors:
buckets[tp] = [] tp = tensor.type()
buckets[tp].append(tensor) if tp not in buckets:
buckets[tp] = []
world_size = dist.get_world_size() buckets[tp].append(tensor)
for tp in buckets: buckets = buckets.values()
bucket = buckets[tp]
coalesced = _flatten_dense_tensors(bucket) for bucket in buckets:
dist.all_reduce(coalesced) flat_tensors = _flatten_dense_tensors(bucket)
coalesced.div_(world_size) dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for buf, synced in zip(bucket, for tensor, synced in zip(
_unflatten_dense_tensors(coalesced, bucket)): bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
buf.copy_(synced) tensor.copy_(synced)
def reduce_grads(model, coalesce=True): def allreduce_grads(model, coalesce=True, bucket_size_mb=-1):
grads = [ grads = [
param.grad.data for param in model.parameters() param.grad.data for param in model.parameters()
if param.requires_grad and param.grad is not None if param.requires_grad and param.grad is not None
] ]
world_size = dist.get_world_size()
if coalesce: if coalesce:
all_reduce_coalesced(grads) _allreduce_coalesced(grads, world_size, bucket_size_mb)
else: else:
world_size = dist.get_world_size()
for tensor in grads: for tensor in grads:
dist.all_reduce(tensor.div_(world_size)) dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook): class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True): def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip self.grad_clip = grad_clip
self.coalesce = coalesce self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner): def after_train_iter(self, runner):
runner.optimizer.zero_grad() runner.optimizer.zero_grad()
runner.outputs['loss'].backward() runner.outputs['loss'].backward()
reduce_grads(runner.model, self.coalesce) allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None: if self.grad_clip is not None:
clip_grad.clip_grad_norm_( self.clip_grads(runner.model.parameters())
filter(lambda p: p.requires_grad, runner.model.parameters()),
**self.grad_clip)
runner.optimizer.step() runner.optimizer.step()
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
runner.data_loader.sampler.set_epoch(runner.epoch)
from .coco import CocoDataset from .coco import CocoDataset
from .loader import (collate, GroupSampler, DistributedGroupSampler, from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
build_dataloader) from .utils import to_tensor, random_scale, show_ann
from .utils import DataContainer, to_tensor, random_scale, show_ann
__all__ = [ __all__ = [
'CocoDataset', 'collate', 'GroupSampler', 'DistributedGroupSampler', 'CocoDataset', 'GroupSampler', 'DistributedGroupSampler',
'build_dataloader', 'DataContainer', 'to_tensor', 'random_scale', 'build_dataloader', 'to_tensor', 'random_scale', 'show_ann'
'show_ann'
] ]
...@@ -2,13 +2,13 @@ import os.path as osp ...@@ -2,13 +2,13 @@ import os.path as osp
import mmcv import mmcv
import numpy as np import numpy as np
from mmcv.parallel import DataContainer as DC
from pycocotools.coco import COCO from pycocotools.coco import COCO
from torch.utils.data import Dataset from torch.utils.data import Dataset
from .transforms import (ImageTransform, BboxTransform, MaskTransform, from .transforms import (ImageTransform, BboxTransform, MaskTransform,
Numpy2Tensor) Numpy2Tensor)
from .utils import to_tensor, show_ann, random_scale from .utils import to_tensor, show_ann, random_scale
from .utils import DataContainer as DC
class CocoDataset(Dataset): class CocoDataset(Dataset):
......
from .build_loader import build_dataloader from .build_loader import build_dataloader
from .collate import collate
from .sampler import GroupSampler, DistributedGroupSampler from .sampler import GroupSampler, DistributedGroupSampler
__all__ = [ __all__ = [
'collate', 'GroupSampler', 'DistributedGroupSampler', 'build_dataloader' 'GroupSampler', 'DistributedGroupSampler', 'build_dataloader'
] ]
from functools import partial from functools import partial
from mmcv.runner import get_dist_info from mmcv.runner import get_dist_info
from mmcv.parallel import collate
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from .collate import collate
from .sampler import GroupSampler, DistributedGroupSampler from .sampler import GroupSampler, DistributedGroupSampler
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset, def build_dataloader(dataset,
imgs_per_gpu, imgs_per_gpu,
......
import collections
import torch
import torch.nn.functional as F
from torch.utils.data.dataloader import default_collate
from ..utils import DataContainer
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def collate(batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for :type:`~mmdet.DataContainer`.
There are 3 cases for data containers.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
# TODO: handle tensors other than 3d
assert batch[i].dim() == 3
c, h, w = batch[0].size()
for sample in batch[i:i + samples_per_gpu]:
assert c == sample.size(0)
h = max(h, sample.size(1))
w = max(w, sample.size(2))
padded_samples = [
F.pad(
sample.data,
(0, w - sample.size(2), 0, h - sample.size(1)),
value=sample.padding_value)
for sample in batch[i:i + samples_per_gpu]
]
stacked.append(default_collate(padded_samples))
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
...@@ -2,11 +2,7 @@ import mmcv ...@@ -2,11 +2,7 @@ import mmcv
import numpy as np import numpy as np
import torch import torch
from mmdet.core.mask_ops import segms __all__ = ['ImageTransform', 'BboxTransform', 'MaskTransform', 'Numpy2Tensor']
__all__ = [
'ImageTransform', 'BboxTransform', 'PolyMaskTransform', 'Numpy2Tensor'
]
class ImageTransform(object): class ImageTransform(object):
...@@ -85,26 +81,6 @@ class BboxTransform(object): ...@@ -85,26 +81,6 @@ class BboxTransform(object):
return padded_bboxes return padded_bboxes
class PolyMaskTransform(object):
"""Preprocess polygons."""
def __init__(self):
pass
def __call__(self, gt_mask_polys, gt_poly_lens, img_h, img_w, flip=False):
if flip:
gt_mask_polys = segms.flip_segms(gt_mask_polys, img_h, img_w)
num_polys_per_mask = np.array(
[len(mask_polys) for mask_polys in gt_mask_polys], dtype=np.int64)
gt_poly_lens = np.array(gt_poly_lens, dtype=np.int64)
gt_mask_polys = [
np.concatenate(mask_polys).astype(np.float32)
for mask_polys in gt_mask_polys
]
gt_mask_polys = np.concatenate(gt_mask_polys)
return gt_mask_polys, gt_poly_lens, num_polys_per_mask
class MaskTransform(object): class MaskTransform(object):
"""Preprocess masks. """Preprocess masks.
......
...@@ -5,7 +5,6 @@ import torch ...@@ -5,7 +5,6 @@ import torch
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import pycocotools.mask as maskUtils
def to_tensor(data): def to_tensor(data):
...@@ -68,19 +67,3 @@ def show_ann(coco, img, ann_info): ...@@ -68,19 +67,3 @@ def show_ann(coco, img, ann_info):
plt.axis('off') plt.axis('off')
coco.showAnns(ann_info) coco.showAnns(ann_info)
plt.show() plt.show()
def draw_bbox_and_segm(img, results, dataset, score_thr=0.5):
bbox_results, segm_results = results
hi_bboxes = []
for cls_bboxes, cls_segms in zip(bbox_results, segm_results):
if len(cls_bboxes) == 0:
hi_bboxes.append(cls_bboxes)
continue
inds = np.where(cls_bboxes[:, -1] > score_thr)[0]
hi_bboxes.append(cls_bboxes[inds, :])
color_mask = np.random.random((1, 3))
for i in inds:
mask = maskUtils.decode(cls_segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
mmcv.draw_bboxes_with_label(np.ascontiguousarray(img), hi_bboxes, dataset)
from .data_container import DataContainer
from .misc import to_tensor, random_scale, show_ann
__all__ = ['DataContainer', 'to_tensor', 'random_scale', 'show_ann']
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment