import platform import random from functools import partial import numpy as np from mmcv.parallel import collate from mmcv.runner import get_dist_info from torch.utils.data import DataLoader from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, seed=None, **kwargs): shuffle = kwargs.get('shuffle', True) if dist: rank, world_size = get_dist_info() if shuffle: sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank) else: sampler = DistributedSampler( dataset, world_size, rank, shuffle=False) batch_size = samples_per_gpu num_workers = workers_per_gpu else: sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None batch_size = num_gpus * samples_per_gpu num_workers = num_gpus * workers_per_gpu data_loader = DataLoader( dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=worker_init_fn if seed is not None else None, **kwargs) return data_loader def worker_init_fn(seed): np.random.seed(seed) random.seed(seed)