colossalai.py 8.79 KB
Newer Older
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
1
import warnings
2
from typing import Optional
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
3
4
5
6
7
8
9

import torch
import torch.distributed as dist
import torch.nn as nn
from transformers.tokenization_utils_base import PreTrainedTokenizerBase

import colossalai
10
11
12
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin
from colossalai.booster.plugin.gemini_plugin import GeminiModel
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
13
14
from colossalai.tensor import ProcessGroup, ShardSpec
from colossalai.utils import get_current_device
15
16
from colossalai.zero import ColoInitContext
from colossalai.zero.gemini.gemini_ddp import GeminiDDP
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
17
18
19
20
21
22
23
24
25
26
27
28
29

from .ddp import DDPStrategy


class ColossalAIStrategy(DDPStrategy):
    """
        The strategy for training with ColossalAI.

    Args:
        stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
        precision(str): The precision to use. Choose in ('fp32', 'fp16'). Stage 3 only supports fp16.
        seed(int): The seed for the random number generator.
        shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
30
            This is not compatible with `from_pretrained()`. We temporarily disable this and will support it in the future.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
31
32
33
34
35
        placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
                          If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
                          If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
        pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.
        force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.
36
        search_range_m(int): The number of search range for the chunk size, divided by 2^20. Only for ZeRO-3.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
37
        hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.
38
        min_chunk_size_m(float): The minimum chunk size divided by 2^20. Only for ZeRO-3.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
39
        gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.
40
        reduce_bucket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
        overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.
        initial_scale(float): The initial scale for the optimizer.
        growth_factor(float): The growth factor for the optimizer.
        backoff_factor(float): The backoff factor for the optimizer.
        growth_interval(int): The growth interval for the optimizer.
        hysteresis(int): The hysteresis for the optimizer.
        min_scale(float): The minimum scale for the optimizer.
        max_scale(float): The maximum scale for the optimizer.
        max_norm(float): The maximum norm for the optimizer.
        norm_type(float): The norm type for the optimizer.

    """

    def __init__(
            self,
            stage: int = 3,
            precision: str = 'fp16',
            seed: int = 42,
            shard_init: bool = False,    # only for stage 3
            placement_policy: str = 'cuda',
            pin_memory: bool = True,    # only for stage 3
            force_outputs_fp32: bool = False,    # only for stage 3
63
            search_range_m: int = 32,    # only for stage 3
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
64
            hidden_dim: Optional[int] = None,    # only for stage 3
65
            min_chunk_size_m: float = 32,    # only for stage 3
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
66
67
68
69
70
71
72
73
74
75
76
77
            gpu_margin_mem_ratio: float = 0.0,    # only for stage 3
            reduce_bucket_size: int = 12 * 1024**2,    # only for stage 1&2
            overlap_communication: bool = True,    # only for stage 1&2
            initial_scale: float = 2**16,
            growth_factor: float = 2,
            backoff_factor: float = 0.5,
            growth_interval: int = 1000,
            hysteresis: int = 2,
            min_scale: float = 1,
            max_scale: float = 2**32,
            max_norm: float = 0.0,
            norm_type: float = 2.0) -> None:
78
79

        assert stage in (1, 2, 3), f'Unsupported stage "{stage}"'
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
80
81
        assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
        assert precision in ('fp32', 'fp16'), f'Unsupported precision "{precision}"'
82

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
83
84
        # TODO(ver217): support shard_init when using from_pretrained()
        if shard_init:
85
86
            warnings.warn(f'Shard init is not supported model.from_pretrained() yet. '
                          'Please load weights after strategy.prepare()')
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
87
88
89
90
91
        if stage == 3 and precision == 'fp32':
            warnings.warn(f'Stage 3 only supports fp16. Precision is set to fp16.')
            precision = 'fp16'
        self.precision = precision
        self.shard_init = shard_init
92

93
94
95
96
97
98
99
100
101
        optim_kwargs = dict(initial_scale=initial_scale,
                            growth_factor=growth_factor,
                            backoff_factor=backoff_factor,
                            growth_interval=growth_interval,
                            hysteresis=hysteresis,
                            min_scale=min_scale,
                            max_scale=max_scale,
                            max_norm=max_norm,
                            norm_type=norm_type)
102
        # NOTE: dist should be initialized before calling get_current_device()
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
103
        if stage == 3:
104
            plugin_initializer = lambda: GeminiPlugin(
105
                # gemini_config
106
107
108
109
110
111
                device=get_current_device(),
                placement_policy=placement_policy,
                precision=precision,
                pin_memory=pin_memory,
                force_outputs_fp32=force_outputs_fp32,
                strict_ddp_mode=shard_init,
112
                search_range_m=search_range_m,
113
                hidden_dim=hidden_dim,
114
                min_chunk_size_m=min_chunk_size_m,
115
                # zero_optim_config
116
                gpu_margin_mem_ratio=gpu_margin_mem_ratio,
117
                # optim_config
118
                **optim_kwargs)
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
119
        else:
120
            plugin_initializer = lambda: LowLevelZeroPlugin(
121
                # zero_config
122
123
                stage=stage,
                precision=precision,
124
                # zero_optim_config
125
126
127
                reduce_bucket_size_in_m=reduce_bucket_size,
                overlap_communication=overlap_communication,
                cpu_offload=(placement_policy == 'cpu'),
128
                # optim_config
129
                **optim_kwargs)
130
131
132
133
134
135

        super().__init__(seed, plugin_initializer)

    def _post_init(self) -> None:
        assert isinstance(self.plugin, (LowLevelZeroPlugin, GeminiPlugin)), \
            f'{type(self).__name__}\'s plugin is not initialized properly.'
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
136
137
138
139
140

    def setup_distributed(self) -> None:
        colossalai.launch_from_torch({}, seed=self.seed)

    def model_init_context(self):
141
        if isinstance(self.plugin, GeminiPlugin):
Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
142
143
144
145
146
147
148
149
150
            world_size = dist.get_world_size()
            shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None
            default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None
            return ColoInitContext(device=get_current_device(),
                                   dtype=torch.half,
                                   default_pg=shard_pg,
                                   default_dist_spec=default_dist_spec)
        return super().model_init_context()

151
    def unwrap_model(self, model: nn.Module) -> nn.Module:
152
153
154
155
156
157
158
        if isinstance(self.plugin, GeminiPlugin):
            assert isinstance(model, GeminiModel)
            ddp_model = model.unwrap()
            assert isinstance(ddp_model, GeminiDDP)
            return ddp_model.module
        elif isinstance(self.plugin, LowLevelZeroPlugin):
            assert isinstance(model, LowLevelZeroModel)
159
            return model.module
160
161
        else:
            raise RuntimeError(f'Unsupported plugin {type(self.plugin)}')
162
163
164
165
166
167

    def save_pretrained(self,
                        model: nn.Module,
                        path: str,
                        only_rank0: bool = True,
                        tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None:
168
        if isinstance(self.plugin, GeminiPlugin):
169
170
            raise RuntimeError('ColossalAI strategy with stage-3 does not support save_pretrained() now')
        super().save_pretrained(model, path, only_rank0, tokenizer)
171
172

    def get_model_state_dict_shard(self, model: nn.Module, **config):
173
        if not isinstance(self.plugin, GeminiPlugin):
174
175
176
177
178
179
180
            yield from super().get_model_state_dict_shard(model, **config)
        else:
            # unwrapped_model = self._unwrap_model(model)
            # for module in unwrapped_model.modules():
            #     if isinstance(module, LoraLinear):
            #         module.merge_weights = True
            #         module.eval()
181
            assert isinstance(model, LowLevelZeroModel)
182
            yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False)