initializer_model.py 2.12 KB
Newer Older
ver217's avatar
ver217 committed
1
2
3
4
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import torch.distributed as dist
5
6
7

from colossalai.legacy.registry import DIST_GROUP_INITIALIZER

ver217's avatar
ver217 committed
8
from ..parallel_mode import ParallelMode
9
from .process_group_initializer import ProcessGroupInitializer
ver217's avatar
ver217 committed
10
11
12
13


@DIST_GROUP_INITIALIZER.register_module
class Initializer_Model(ProcessGroupInitializer):
HELSON's avatar
HELSON committed
14
15
16
    """A ProcessGroupInitializer for model parallelism (model parallel group contains pipeline and tensor parallel
    groups).

17
18
19
20
21
22
23
    Args:
        rank (int): The rank of current process.
        world_size (int): Size of whole communication world.
        config (Config): Running configuration.
        data_parallel_size (int): Size of data parallel.
        pipeline_parallel_size (int): Size of pipeline parallel.
        tensor_parallel_size (int): Size of tensor parallel.
HELSON's avatar
HELSON committed
24
    """
ver217's avatar
ver217 committed
25
26
27
28
29
30
31

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.model_parallel_size = self.tensor_parallel_size * self.pipeline_parallel_size
        self.num_group = self.world_size // self.model_parallel_size

    def init_dist_group(self):
HELSON's avatar
HELSON committed
32
        """Initialize model parallel groups, and assign local_ranks and groups to each gpu.
ver217's avatar
ver217 committed
33

34
35
36
        Returns:
            Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode):
                A Model parallelism's information tuple.
HELSON's avatar
HELSON committed
37
        """
ver217's avatar
ver217 committed
38
39
40
        local_rank = None
        ranks_in_group = None
        process_group = None
41
        cpu_group = None
ver217's avatar
ver217 committed
42
43
44
45
46
47
        group_world_size = None
        mode = ParallelMode.MODEL

        for i in range(self.num_group):
            ranks = [i * self.model_parallel_size + j for j in range(self.model_parallel_size)]
            group = dist.new_group(ranks)
48
            group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group
ver217's avatar
ver217 committed
49
50
51
52
53

            if self.rank in ranks:
                local_rank = ranks.index(self.rank)
                group_world_size = len(ranks)
                process_group = group
54
                cpu_group = group_cpu
ver217's avatar
ver217 committed
55
                ranks_in_group = ranks
56
57

        return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode