Commit 82356fd9 authored by Kai Chen's avatar Kai Chen
Browse files

support chunk when reducing grads

parent 3d2b79bd
from .dist_utils import (init_dist, reduce_grads, DistOptimizerHook, from .dist_utils import init_dist, allreduce_grads, DistOptimizerHook
DistSamplerSeedHook)
from .misc import tensor2imgs, unmap, multi_apply from .misc import tensor2imgs, unmap, multi_apply
__all__ = [ __all__ = [
'init_dist', 'reduce_grads', 'DistOptimizerHook', 'DistSamplerSeedHook', 'init_dist', 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs',
'tensor2imgs', 'unmap', 'multi_apply' 'unmap', 'multi_apply'
] ]
...@@ -4,9 +4,9 @@ from collections import OrderedDict ...@@ -4,9 +4,9 @@ from collections import OrderedDict
import torch import torch
import torch.multiprocessing as mp import torch.multiprocessing as mp
import torch.distributed as dist import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
from torch.nn.utils import clip_grad _take_tensors)
from mmcv.runner import Hook, OptimizerHook from mmcv.runner import OptimizerHook
def init_dist(launcher, backend='nccl', **kwargs): def init_dist(launcher, backend='nccl', **kwargs):
...@@ -38,59 +38,52 @@ def _init_dist_slurm(backend, **kwargs): ...@@ -38,59 +38,52 @@ def _init_dist_slurm(backend, **kwargs):
raise NotImplementedError raise NotImplementedError
# modified from def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
# https://github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py#L9 if bucket_size_mb > 0:
def all_reduce_coalesced(tensors): bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = OrderedDict() buckets = _take_tensors(tensors, bucket_size_bytes)
for tensor in tensors: else:
tp = tensor.type() buckets = OrderedDict()
if tp not in buckets: for tensor in tensors:
buckets[tp] = [] tp = tensor.type()
buckets[tp].append(tensor) if tp not in buckets:
buckets[tp] = []
world_size = dist.get_world_size() buckets[tp].append(tensor)
for tp in buckets: buckets = buckets.values()
bucket = buckets[tp]
coalesced = _flatten_dense_tensors(bucket) for bucket in buckets:
dist.all_reduce(coalesced) flat_tensors = _flatten_dense_tensors(bucket)
coalesced.div_(world_size) dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for buf, synced in zip(bucket, for tensor, synced in zip(
_unflatten_dense_tensors(coalesced, bucket)): bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
buf.copy_(synced) tensor.copy_(synced)
def reduce_grads(model, coalesce=True): def allreduce_grads(model, coalesce=True, bucket_size_mb=-1):
grads = [ grads = [
param.grad.data for param in model.parameters() param.grad.data for param in model.parameters()
if param.requires_grad and param.grad is not None if param.requires_grad and param.grad is not None
] ]
world_size = dist.get_world_size()
if coalesce: if coalesce:
all_reduce_coalesced(grads) _allreduce_coalesced(grads, world_size, bucket_size_mb)
else: else:
world_size = dist.get_world_size()
for tensor in grads: for tensor in grads:
dist.all_reduce(tensor.div_(world_size)) dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook): class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True): def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip self.grad_clip = grad_clip
self.coalesce = coalesce self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner): def after_train_iter(self, runner):
runner.optimizer.zero_grad() runner.optimizer.zero_grad()
runner.outputs['loss'].backward() runner.outputs['loss'].backward()
reduce_grads(runner.model, self.coalesce) allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None: if self.grad_clip is not None:
clip_grad.clip_grad_norm_( self.clip_grads(runner.model.parameters())
filter(lambda p: p.requires_grad, runner.model.parameters()),
**self.grad_clip)
runner.optimizer.step() runner.optimizer.step()
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
runner.data_loader.sampler.set_epoch(runner.epoch)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment