test_fsdp.py 10.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
import os
from functools import partial

from parameterized import parameterized

import tests.trainer.test_trainer
from tests.trainer.test_trainer import TrainerIntegrationCommon  # noqa
from transformers import is_torch_available
from transformers.testing_utils import (
    TestCasePlus,
    execute_subprocess_async,
    get_gpu_count,
    mockenv_context,
    require_accelerate,
    require_fsdp,
    require_torch_gpu,
    require_torch_multi_gpu,
    slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import FSDPOption, set_seed
from transformers.utils import is_accelerate_available, is_torch_bf16_gpu_available


# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"
dtypes = ["fp16"]
if is_torch_bf16_gpu_available():
    dtypes += ["bf16"]
sharding_strategies = ["full_shard", "shard_grad_op"]
state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"]
set_seed(42)
params = list(itertools.product(sharding_strategies, dtypes))


def get_master_port(real_launcher=False):
    """
    When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)
    the issue is that once the port is tied it can't be used anywhere else outside of this process,
    since torch.dist doesn't free the port until the process exits. Therefore for the sake of being
    able to run both emulated launcher and normal launcher tests we need 2 distinct ports.

    This function will give the right port in the right context. For real launcher it'll give the
    base port, for emulated launcher it'll give the base port + 1. In both cases a string is
    returned.

    Args:
        `real_launcher`: whether a real launcher is going to be used, or the emulated one

    """

    master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
    if not real_launcher:
        master_port_base = str(int(master_port_base) + 1)
    return master_port_base


if is_torch_available():
    from tests.trainer.test_trainer import (  # noqa
        RegressionModelConfig,
        RegressionPreTrainedModel,
    )

    # hack to restore original logging level pre #21700
    get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info")

if is_accelerate_available():
    from accelerate.utils.constants import (
        FSDP_PYTORCH_VERSION,
        FSDP_SHARDING_STRATEGY,
    )

    require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION)


def get_launcher(distributed=False, use_accelerate=False):
    # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
    # - it won't be able to handle that
    # 2. for now testing with just 2 gpus max (since some quality tests may give different
    # results with mode gpus because we use very little data)
    num_gpus = min(2, get_gpu_count()) if distributed else 1
    master_port = get_master_port(real_launcher=True)
    if use_accelerate:
        return f"""accelerate launch
        --num_processes {num_gpus}
        --main_process_port {master_port}
        --use_fsdp
        --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP
        --fsdp_state_dict_type SHARDED_STATE_DICT
        --fsdp_transformer_layer_cls_to_wrap BertLayer""".split()
    return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split()


def _parameterized_custom_name_func(func, param_num, param):
    # customize the test name generator function as we want both params to appear in the sub-test
    # name, as by default it shows only the first param
    param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
    return f"{func.__name__}_{param_based_name}"


@require_accelerate
@require_torch_gpu
@require_fsdp_version
class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon):
    def setUp(self):
        super().setUp()
        master_port = get_master_port(real_launcher=False)
        self.dist_env_1_gpu = {
            "MASTER_ADDR": "localhost",
            "MASTER_PORT": master_port,
            "RANK": "0",
            "LOCAL_RANK": "0",
            "WORLD_SIZE": "1",
        }

        self.fsdp_config = {
            "backward_prefetch": "backward_pre",
            "forward_prefetch": "False",
            "limit_all_gathers": "False",
            "use_orig_params": "True",
            "sync_module_states": "True",
            "activation_checkpointing": "False",
            "min_num_params": 1,
        }

    def tearDown(self):
        super().tearDown()

    @parameterized.expand(params, name_func=_parameterized_custom_name_func)
    def test_fsdp_config(self, sharding_strategy, dtype):
        output_dir = self.get_auto_remove_tmp_dir()
        kwargs = {
            "output_dir": output_dir,
            "train_len": 128,
            "save_steps": 5,
            "learning_rate": 0.1,
            "fsdp": f"{sharding_strategy} offload auto_wrap",
            "fsdp_config": self.fsdp_config,
        }
        kwargs[dtype] = True
        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            self.assertEqual(trainer.args.fsdp[0], sharding_strategy)
            self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD)
            self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP)
            for k, v in trainer.args.fsdp_config.items():
                self.assertEqual(v, self.fsdp_config[k])
            self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true")

    @parameterized.expand(params, name_func=_parameterized_custom_name_func)
    @require_torch_multi_gpu
    @slow
    def test_basic_run(self, sharding_strategy, dtype):
        launcher = get_launcher(distributed=True, use_accelerate=False)
        output_dir = self.get_auto_remove_tmp_dir()
        args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"]
        fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"]
        script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"]
        cmd = launcher + script + args + fsdp_args
        execute_subprocess_async(cmd, env=self.get_env())

    @parameterized.expand(dtypes)
    @require_torch_multi_gpu
    @slow
    def test_basic_run_with_cpu_offload(self, dtype):
        launcher = get_launcher(distributed=True, use_accelerate=False)
        output_dir = self.get_auto_remove_tmp_dir()
        args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"]
        fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"]
        script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"]
        cmd = launcher + script + args + fsdp_args
        execute_subprocess_async(cmd, env=self.get_env())

    @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func)
    @require_torch_multi_gpu
    @slow
    def test_training_and_can_resume_normally(self, state_dict_type):
        output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False)

        sharding_strategy = "full_shard"
        use_accelerate = state_dict_type == "SHARDED_STATE_DICT"
        launcher = get_launcher(True, use_accelerate=use_accelerate)
        args = self.get_base_args(output_dir, 2, 25).split()
        script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"]
        logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir)

        # resume from ckpt
        checkpoint = os.path.join(output_dir, "checkpoint-115")
        resume_args = args + f"--resume_from_checkpoint {checkpoint}".split()
        logs_resume = self.run_cmd_and_get_logs(
            use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir
        )

        for log, log1 in zip(logs, logs_resume):
            if "learning_rate" in log:
                self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5)

    def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir):
        if not use_accelerate:
            fsdp_args = [
                "--fsdp",
                f"{sharding_strategy} auto_wrap",
                "--fsdp_transformer_layer_cls_to_wrap",
                "BertLayer",
            ]
            cmd = launcher + script + args + fsdp_args
        else:
            fsdp_config = f"""
                --fsdp_sharding_strategy {FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1}
            """.split()
            cmd = launcher + fsdp_config + script + args

        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        execute_subprocess_async(cmd, env=self.get_env())
        logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history
        return logs

    def get_base_args(self, output_dir, num_epochs, logging_steps):
        return f"""
            --model_name_or_path bert-base-cased
            --task_name mrpc
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --max_seq_length 128
            --per_device_train_batch_size 16
            --learning_rate 5e-5
            --num_train_epochs {num_epochs}
            --lr_scheduler_type cosine
            --logging_steps {logging_steps}
            --save_strategy epoch
            --do_eval
            --evaluation_strategy epoch
            --report_to none
        """