colossalai.py 8.72 KB
Newer Older
1
import functools
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
2
import warnings
3
from typing import Optional
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
4
5
6
7
8
9
10

import torch
import torch.distributed as dist
import torch.nn as nn
from transformers.tokenization_utils_base import PreTrainedTokenizerBase

import colossalai
11
12
13
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin
from colossalai.booster.plugin.gemini_plugin import GeminiModel
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
14
15
from colossalai.tensor import ProcessGroup, ShardSpec
from colossalai.utils import get_current_device
16
17
from colossalai.zero import ColoInitContext
from colossalai.zero.gemini.gemini_ddp import GeminiDDP
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
18
19
20
21
22
23
24
25
26
27
28
29
30

from .ddp import DDPStrategy


class ColossalAIStrategy(DDPStrategy):
    """
        The strategy for training with ColossalAI.

    Args:
        stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
        precision(str): The precision to use. Choose in ('fp32', 'fp16'). Stage 3 only supports fp16.
        seed(int): The seed for the random number generator.
        shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
31
            This is not compatible with `from_pretrained()`. We temporarily disable this and will support it in the future.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
32
33
34
35
36
37
38
39
40
        placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
                          If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
                          If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
        pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.
        force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.
        search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.
        hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.
        min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.
        gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.
41
        reduce_bucket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
        overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.
        initial_scale(float): The initial scale for the optimizer.
        growth_factor(float): The growth factor for the optimizer.
        backoff_factor(float): The backoff factor for the optimizer.
        growth_interval(int): The growth interval for the optimizer.
        hysteresis(int): The hysteresis for the optimizer.
        min_scale(float): The minimum scale for the optimizer.
        max_scale(float): The maximum scale for the optimizer.
        max_norm(float): The maximum norm for the optimizer.
        norm_type(float): The norm type for the optimizer.

    """

    def __init__(
            self,
            stage: int = 3,
            precision: str = 'fp16',
            seed: int = 42,
            shard_init: bool = False,    # only for stage 3
            placement_policy: str = 'cuda',
            pin_memory: bool = True,    # only for stage 3
            force_outputs_fp32: bool = False,    # only for stage 3
            search_range_mb: int = 32,    # only for stage 3
            hidden_dim: Optional[int] = None,    # only for stage 3
            min_chunk_size_mb: float = 32,    # only for stage 3
            gpu_margin_mem_ratio: float = 0.0,    # only for stage 3
            reduce_bucket_size: int = 12 * 1024**2,    # only for stage 1&2
            overlap_communication: bool = True,    # only for stage 1&2
            initial_scale: float = 2**16,
            growth_factor: float = 2,
            backoff_factor: float = 0.5,
            growth_interval: int = 1000,
            hysteresis: int = 2,
            min_scale: float = 1,
            max_scale: float = 2**32,
            max_norm: float = 0.0,
            norm_type: float = 2.0) -> None:
79
80

        assert stage in (1, 2, 3), f'Unsupported stage "{stage}"'
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
81
82
        assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
        assert precision in ('fp32', 'fp16'), f'Unsupported precision "{precision}"'
83

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
84
85
86
        # TODO(ver217): support shard_init when using from_pretrained()
        if shard_init:
            warnings.warn(
87
88
                f'Shard init is not supported model.from_pretrained() yet. '
                'Please load weights after strategy.prepare()'
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
89
90
91
92
93
94
            )
        if stage == 3 and precision == 'fp32':
            warnings.warn(f'Stage 3 only supports fp16. Precision is set to fp16.')
            precision = 'fp16'
        self.precision = precision
        self.shard_init = shard_init
95
96
97
98
99
100
101
102
103
104
105
106
107

        optim_kwargs = dict(
            initial_scale=initial_scale,
            growth_factor=growth_factor,
            backoff_factor=backoff_factor,
            growth_interval=growth_interval,
            hysteresis=hysteresis,
            min_scale=min_scale,
            max_scale=max_scale,
            max_norm=max_norm,
            norm_type=norm_type
        )
        # NOTE: dist should be initialized before calling get_current_device()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
108
        if stage == 3:
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
            plugin_initializer = lambda: GeminiPlugin(
                # gemini_config
                device=get_current_device(),
                placement_policy=placement_policy,
                precision=precision,
                pin_memory=pin_memory,
                force_outputs_fp32=force_outputs_fp32,
                strict_ddp_mode=shard_init,
                search_range_mb=search_range_mb,
                hidden_dim=hidden_dim,
                min_chunk_size_mb=min_chunk_size_mb,
                # zero_optim_config
                gpu_margin_mem_ratio=gpu_margin_mem_ratio,
                # optim_config
                **optim_kwargs
            )
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
125
        else:
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
            plugin_initializer = lambda: LowLevelZeroPlugin(
                # zero_config
                stage=stage,
                precision=precision,
                # zero_optim_config
                reduce_bucket_size_in_m=reduce_bucket_size,
                overlap_communication=overlap_communication,
                cpu_offload=(placement_policy == 'cpu'),
                # optim_config
                **optim_kwargs
            )

        super().__init__(seed, plugin_initializer)

    def _post_init(self) -> None:
        assert isinstance(self.plugin, (LowLevelZeroPlugin, GeminiPlugin)), \
            f'{type(self).__name__}\'s plugin is not initialized properly.'
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
143
144
145
146
147

    def setup_distributed(self) -> None:
        colossalai.launch_from_torch({}, seed=self.seed)

    def model_init_context(self):
148
        if isinstance(self.plugin, GeminiPlugin):
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
149
150
151
152
153
154
155
156
157
            world_size = dist.get_world_size()
            shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None
            default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None
            return ColoInitContext(device=get_current_device(),
                                   dtype=torch.half,
                                   default_pg=shard_pg,
                                   default_dist_spec=default_dist_spec)
        return super().model_init_context()

158
    def unwrap_model(self, model: nn.Module) -> nn.Module:
159
160
161
162
163
164
165
        if isinstance(self.plugin, GeminiPlugin):
            assert isinstance(model, GeminiModel)
            ddp_model = model.unwrap()
            assert isinstance(ddp_model, GeminiDDP)
            return ddp_model.module
        elif isinstance(self.plugin, LowLevelZeroPlugin):
            assert isinstance(model, LowLevelZeroModel)
166
            return model.module
167
168
        else:
            raise RuntimeError(f'Unsupported plugin {type(self.plugin)}')
169
170
171
172
173
174

    def save_pretrained(self,
                        model: nn.Module,
                        path: str,
                        only_rank0: bool = True,
                        tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None:
175
        if isinstance(self.plugin, GeminiPlugin):
176
177
            raise RuntimeError('ColossalAI strategy with stage-3 does not support save_pretrained() now')
        super().save_pretrained(model, path, only_rank0, tokenizer)
178
179

    def get_model_state_dict_shard(self, model: nn.Module, **config):
180
        if not isinstance(self.plugin, GeminiPlugin):
181
182
183
184
185
186
187
            yield from super().get_model_state_dict_shard(model, **config)
        else:
            # unwrapped_model = self._unwrap_model(model)
            # for module in unwrapped_model.modules():
            #     if isinstance(module, LoraLinear):
            #         module.merge_weights = True
            #         module.eval()
188
            assert isinstance(model, LowLevelZeroModel)
189
            yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False)