random.py 13.7 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch

import contextlib

import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from torch.utils.checkpoint import detach_variable

27
28
from megatron.memory import allocate_mem_buff

29
from .initialize import get_data_parallel_rank
30
31
32
from .initialize import get_tensor_model_parallel_group
from .initialize import get_tensor_model_parallel_rank
from .initialize import get_tensor_model_parallel_world_size
33
34
35


# Default name for the model parallel rng tracker.
36
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
37
38
39
40
41
42
43
44
45
46
47


def _set_cuda_rng_state(new_state, device=-1):
    """Sets the random number generator state of the current GPU.

    Argumentss:
        new_state (torch.ByteTensor): The desired state
    This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
    with a single change: the input state is not cloned. Cloning caused
    major performance issues for +4 GPU cases.
    """
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
        # older PyTorch
        def cb():
            with device_ctx_manager(device):
                _C._cuda_setRNGState(new_state)
    else:
        # newer PyTorch
        if device == -1:
            device = torch.device('cuda')
        elif isinstance(device, str):
            device = torch.device(device)
        elif isinstance(device, int):
            device = torch.device('cuda', device)

        def cb():
            idx = device.index
            if idx is None:
                idx = torch.cuda.current_device()
            default_generator = torch.cuda.default_generators[idx]
            default_generator.set_state(new_state)
68
69
70
71

    _lazy_call(cb)


72
def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False):
73
    """Break a tensor into equal 1D chunks."""
74
75
    partition_size = torch.numel(tensor) // \
        get_tensor_model_parallel_world_size()
76
    start_index = partition_size * get_tensor_model_parallel_rank()
77
    end_index = start_index + partition_size
78
79
80
81
82
83
84
85
86
    if new_buffer:
        data = torch.empty(partition_size, dtype=tensor.dtype,
                           device=torch.cuda.current_device(),
                           requires_grad=False)
        data.copy_(tensor.view(-1)[start_index:end_index])
    else:
        data = tensor.view(-1)[start_index:end_index]
    return data
    
87
88
89

def gather_split_1d_tensor(tensor):
    """Opposite of above function, gather values from model parallel ranks."""
90
    world_size = get_tensor_model_parallel_world_size()
91
92
93
94
95
96
97
    numel = torch.numel(tensor)
    numel_gathered = world_size * numel
    gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
                           device=torch.cuda.current_device(),
                           requires_grad=False)
    chunks = [gathered[i*numel:(i+1)*numel] for i in range(world_size)]
    torch.distributed.all_gather(chunks, tensor,
98
                                 group=get_tensor_model_parallel_group())
99
100
    return gathered

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# >>>
# from lutil import pax

# def make_standalone_tensor(a):
#     assert a._base is not None
#     b = torch.empty((1,), dtype = a.dtype, device = a.device)
#     b.data = a.data
#     return b
# class MakeStandaloneTensor(torch.autograd.Function):
class MakeViewlessTensor_(torch.autograd.Function):
    @staticmethod
    def forward(ctx, inp):
        assert inp._base is not None
        out = torch.empty((1,), dtype = inp.dtype, device = inp.device)
        out.data = inp.data
        # pax(0, {"inp": inp, "out": out})
        return out
    @staticmethod
    def backward(ctx, grad_output):
        # pax(0, {"grad_output": grad_output})
        return grad_output

def make_viewless_tensor(tensor):
    if tensor._base is None:
        return tensor
    else:
        return MakeViewlessTensor_.apply(tensor)

def assert_viewless_tensor(tensor):
    if isinstance(tensor, list):
        [ assert_viewless_tensor(t) for t in tensor ]
        return
    # assert isinstance(tensor, torch.Tensor), \
    #     "expected Tensor; found %s." % type(tensor).__name__
    if not isinstance(tensor, torch.Tensor):
        return
137
    assert tensor._base is None, (
138
139
140
141
        "Ensure tensor._base is None before setting tensor.data or storing "
        "tensor to memory buffer. Otherwise, a memory leak will occur (and "
        "likely accumulate over iterations). FYI, tensor._base has shape "
        "%s, and new_data_tensor has shape %s."
142
    ) % (tensor._base.shape, new_data_tensor.shape)
143
144
145
146

# def set_viewless_tensor_data_attr(tensor, new_data_tensor):
def safely_set_tensor_data_attr(tensor, new_data_tensor):
    assert_viewless_tensor(tensor)
147
    tensor.data = new_data_tensor
148
# <<<
149

150
151
152
153
154
155
156
157
class CudaRNGStatesTracker:
    """Tracker for the cuda RNG states.

    Using the `add` method, a cuda rng state is initialized based on
    the input `seed` and is assigned to `name`. Later, by forking the
    rng state, we can perform operations and return to our starting
    cuda state.
    """
Neel Kant's avatar
Neel Kant committed
158

159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
    def __init__(self):
        # Map from a string name to the cuda rng state.
        self.states_ = {}
        # Seeds are just for book keeping and ensure no seed is set twice.
        self.seeds_ = set()

    def reset(self):
        """Set to the initial state (no tracker)."""
        self.states_ = {}
        self.seeds_ = set()

    def get_states(self):
        """Get rng states. Copy the dictionary so we have direct
        pointers to the states, not just a pointer to the dictionary."""
        states = {}
        for name in self.states_:
            states[name] = self.states_[name]
        return states

    def set_states(self, states):
        """Set the rng states. For efficiency purposes, we do not check
        the size of seed for compatibility."""
        self.states_ = states

    def add(self, name, seed):
        """Track the rng state."""
        # Check seed is not already used.
        if seed in self.seeds_:
            raise Exception('seed {} already exists'.format(seed))
        self.seeds_.add(seed)
        # Check that state is not already defined.
        if name in self.states_:
            raise Exception('cuda rng state {} already exists'.format(name))
        # Get the current rng state.
        orig_rng_state = torch.cuda.get_rng_state()
        # Set the new state and store it.
        torch.cuda.manual_seed(seed)
        self.states_[name] = torch.cuda.get_rng_state()
        # Reset rng state to what it was.
        _set_cuda_rng_state(orig_rng_state)

    @contextlib.contextmanager
    def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
        """Fork the cuda rng state, perform operations, and exit with
        the original state."""
        # Check if we have added the state
        if name not in self.states_:
            raise Exception('cuda rng state {} is not added'.format(name))
        # Store current rng state.
        orig_cuda_rng_state = torch.cuda.get_rng_state()
        # Set rng state to the desired one
        _set_cuda_rng_state(self.states_[name])
        # Do the stuff we wanted to do.
        try:
            yield
        finally:
            # Update the current rng state for later use.
            self.states_[name] = torch.cuda.get_rng_state()
            # And set the state to the original state we started with.
            _set_cuda_rng_state(orig_cuda_rng_state)


# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()


def get_cuda_rng_tracker():
    """Get cuda rng tracker."""
    return _CUDA_RNG_STATE_TRACKER


230
def model_parallel_cuda_manual_seed(seed):
231
232
233
234
235
236
237
238
239
240
    """Initialize model parallel cuda seed.

    This function should be called after the model parallel is
    initialized. Also, no torch.cuda.manual_seed should be called
    after this function. Basically, this is replacement for that
    function.
    Two set of RNG states are tracked:
        default state: This is for data parallelism and is the same among a
                       set of model parallel GPUs but different across
                       different model paralle groups. This is used for
241
242
                       example for dropout in the non-tensor-model-parallel regions.
        tensor-model-parallel state: This state is different among a set of model
243
244
245
246
247
248
                              parallel GPUs, but the same across data parallel
                              groups. This is used for example for dropout in
                              model parallel regions.
    """
    # 2718 is just for fun and any POSITIVE value will work.
    offset = seed + 2718
249
    tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
Nako Sung's avatar
Nako Sung committed
250
    # Data parallel gets the original seed.
251
252
253
254
255
256
    data_parallel_seed = seed

    if torch.distributed.get_rank() == 0:
        print('> initializing model parallel cuda seeds on global rank {}, '
              'model parallel rank {}, and data parallel rank {} with '
              'model parallel seed: {} and data parallel seed: {}'.format(
257
258
                  torch.distributed.get_rank(), get_tensor_model_parallel_rank(),
                  get_data_parallel_rank(), tensor_model_parallel_seed,
259
260
261
262
263
264
                  data_parallel_seed), flush=True)
    _CUDA_RNG_STATE_TRACKER.reset()
    # Set the default state.
    torch.cuda.manual_seed(data_parallel_seed)
    # and model parallel state.
    _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
265
                                tensor_model_parallel_seed)
266
267
268
269
270
271
272
273
274
275


class CheckpointFunction(torch.autograd.Function):
    """This function is adapted from torch.utils.checkpoint with
       two main changes:
           1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
           2) the states in the model parallel tracker are also properly
              tracked/set/reset.
    """
    @staticmethod
276
    def forward(ctx, run_function, distribute_checkpointed_activations, *args):
277
        ctx.run_function = run_function
278
279
        ctx.distribute_checkpointed_activations \
            = distribute_checkpointed_activations
280
281
282
283
284
285
286
287

        # Copy the rng states.
        ctx.fwd_cpu_rng_state = torch.get_rng_state()
        ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
        ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()

        with torch.no_grad():
            outputs = run_function(*args)
288
289
290

        # Divide hidden states across model parallel group and only keep
        # the chunk corresponding to the current rank.
291
        if distribute_checkpointed_activations:
292
293
294
295
            # >>>
            # from lutil import data_leak_ctx
            # with data_leak_ctx(args[0]):
            # <<<
296
            ctx.input_0_shape = args[0].data.shape
297
            # >>>
298
299
300
301
302
            # args[0].data = split_tensor_into_1d_equal_chunks(args[0].data,
            #                                                  new_buffer=True)
            safely_set_tensor_data_attr(
                args[0],
                split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True))
303
            # <<<
304

305
306
307
        # Store everything.
        ctx.save_for_backward(*args)

308
309
310
311
312
313
314
315
        return outputs

    @staticmethod
    def backward(ctx, *args):
        if not torch.autograd._is_checkpoint_valid():
            raise RuntimeError("Checkpointing is not compatible with .grad(), "
                               "please use .backward() if possible")
        inputs = ctx.saved_tensors
316
        if ctx.distribute_checkpointed_activations:
317
318
319
320
321
322
323
324
325
326
            # >>>
            # inputs[0].data = gather_split_1d_tensor(inputs[0].data)
            # inputs[0].data = inputs[0].data.view(ctx.input_0_shape)
            safely_set_tensor_data_attr(
                inputs[0],
                gather_split_1d_tensor(inputs[0].data))
            safely_set_tensor_data_attr(
                inputs[0],
                inputs[0].data.view(ctx.input_0_shape))
            # <<<
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350

        # Store the current states.
        bwd_cpu_rng_state = torch.get_rng_state()
        bwd_cuda_rng_state = torch.cuda.get_rng_state()
        bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()

        # Set the states to what it used to be before the forward pass.
        torch.set_rng_state(ctx.fwd_cpu_rng_state)
        _set_cuda_rng_state(ctx.fwd_cuda_rng_state)
        get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)

        # Compute the forward pass.
        detached_inputs = detach_variable(inputs)
        with torch.enable_grad():
            outputs = ctx.run_function(*detached_inputs)

        # Set the states back to what it was at the start of this function.
        torch.set_rng_state(bwd_cpu_rng_state)
        _set_cuda_rng_state(bwd_cuda_rng_state)
        get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)

        if isinstance(outputs, torch.Tensor):
            outputs = (outputs,)
        torch.autograd.backward(outputs, args)
351
352
        grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
                      for inp in detached_inputs)
353
        return (None, None) + grads
354
355


mshoeybi's avatar
mshoeybi committed
356
def checkpoint(function, distribute_checkpointed_activations, *args):
357
358
    """Checkpoint a model or part of the model.
    This has been directly copied from torch.utils.checkpoint."""
mshoeybi's avatar
mshoeybi committed
359
360
    return CheckpointFunction.apply(function,
                                    distribute_checkpointed_activations, *args)