cpu_offload.py 29.8 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
#
# See LICENSE for license information.

"""Functionality for CPU offloading of tensors saved for backward pass."""
6
from __future__ import annotations
7
from contextlib import nullcontext
8
9
from typing import Any, Dict, Optional

10
11
import torch

12
from transformer_engine.debug.pytorch.debug_state import TEDebugState
13
from .tensor.quantized_tensor import QuantizedTensorStorage
14
from .tensor.float8_tensor import Float8Tensor
15

16
__all__ = ["get_cpu_offload_context"]
17
18

CPUOffloadEnabled = False
19
CPUOffloadedLayer = False
20

yuguo's avatar
yuguo committed
21
22
23
24
25
26
27
28
def get_cpu_offloading():
    global CPUOffloadEnabled
    return CPUOffloadEnabled

def set_cpu_offloading(cpu_offloading):
    global CPUOffloadEnabled
    CPUOffloadEnabled = cpu_offloading

29

30
def mark_activation_offload(*tensors):
31
    """Set the type of the offloading needed for a tensor."""
32
33
34
    if TEDebugState.debug_enabled:
        raise RuntimeError("CPU offload is not supported in debug mode.")

35
36
37
38
39
40
41
42
43
44
45
    for tensor in tensors:
        if tensor is None:
            continue
        if type(tensor) in [torch.Tensor, torch.nn.Parameter]:
            tensor.activation_offloading = True
        else:
            data_tensors = tensor.get_data_tensors()
            for tensor in data_tensors:
                if tensor is not None:
                    tensor.activation_offloading = True
                    # This is a hack to force clear the tensor after it is offloaded.
46
                    # It is needed, because .*TensorStorage classes are saved in the ctx,
47
48
                    # and they contain the reference to their data tensors.
                    tensor.needs_force_clear = True
49
50


51
52
53
54
55
def is_cpu_offload_enabled() -> bool:
    """Check if CPU offloading is currently enabled."""
    return CPUOffloadEnabled


56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
class CpuOffloadSavedTensorHook:
    """Contex-manager that executes a pair of pack/unpack hooks for saved tensors.

    In this context, the ``on_save_for_backward`` method will be called every time
    a tensor is saved for backward (this includes intermediary results saved using
    :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but
    also those recorded by a PyTorch-defined operation).

    The ``on_get_saved_tensors`` method will be called when the backward function
    of this op attempts to retrieve the saved tensor from context (this includes
    :func: `torch.Tensor.backward()` or :func: `torch.autograd.grad()`. It takes the
    as input the return value of the ``on_save_for_backward``, and is meant to return
    an identical copy of the tensor being saved by ``on_save_for_backward`` in terms of
    size, device and element values.

    Example:

        >>> import torch
        >>> from typing import Any
        >>>
        >>> class DummyHook(CpuOffloadSavedTensorHook):
        ...
        ...     def on_save_for_backward(self, tensor: torch.Tensor) -> Any:
        ...         logging.info("On save", tensor)
        ...         return (tensor,)
        ...
        ...     def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor:
        ...         logging.info("On get", saved_state)
        ...         tensor, = saved_state
        ...         return tensor
        ...
        >>> a = torch.ones(5, requires_grad=True)
        >>> b = torch.ones(5, requires_grad=True) * 2
        >>> with DummyHook():
        ...     y = a * b
        ...
        On save tensor([1., 1., 1., 1., 1.], requires_grad=True)
        On save tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>)
        >>> y.sum().backward()
        On get (tensor([1., 1., 1., 1., 1.], requires_grad=True),)
        On get (tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>),)

    """

    def __init__(self) -> None:
        self.inside_context = False

    def __enter__(self):
        global CPUOffloadEnabled
        CPUOffloadEnabled = True

        self.inside_context = True
        torch._C._autograd._push_saved_tensors_default_hooks(
109
110
            self.on_save_for_backward, self.on_get_saved_tensor
        )
111
112
113
114
115
116
117
118
119
120

    def __exit__(self, *args: Any):
        global CPUOffloadEnabled
        CPUOffloadEnabled = False

        self.inside_context = False
        torch._C._autograd._pop_saved_tensors_default_hooks()

    def on_save_for_backward(self, tensor: torch.Tensor) -> Any:
        """On save for backward."""
121
122
123
124
125
        raise NotImplementedError(
            "`on_save_for_backward: Callable[[torch.Tensor], Any]`"
            "is not implemented in CpuOffloadHook class. Inherit "
            "this class and implement your custom hooks"
        )
126
127
128

    def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor:
        """On get saved tensor."""
129
130
131
132
133
        raise NotImplementedError(
            "`on_get_saved_tensors: Callable[[Any], torch.Tensor]`"
            "is not implemented in CpuOffloadHook class. Inherit "
            "this class and implement your custom hooks"
        )
134
135
136
137
138
139
140
141
142


class CpuOffloadHookWithOffloadHandler(CpuOffloadSavedTensorHook):
    """Context-manager that offloads/recovers tensors through an offload hander.

    The hook just offloads/recovers the tensor object to the handler through `tensor_push`
    and `tensor_pop` interface. How the offload-handler manages the offloading, recovering
    or prefetching timing is transparent to this hook.
    """
143

144
145
146
    def __init__(
        self,
        offload_handler: OffloadHandler,
147
        handler_extra_kwargs: Optional[Dict[str, Any]] = None,
148
149
150
151
152
153
        debug: bool = False,
    ) -> None:
        if handler_extra_kwargs is None:
            handler_extra_kwargs = {}
        self.debug: bool = debug
        self.offload_handler: OffloadHandler = offload_handler
154
        self.handler_extra_kwargs: Dict[str, Any] = handler_extra_kwargs
155
156
157
        super().__init__()

    def on_save_for_backward(self, tensor: torch.Tensor) -> Any:
158
        retrieve_identifier = self.offload_handler.tensor_push(tensor, **self.handler_extra_kwargs)
159
160
161
        return retrieve_identifier

    def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor:
162
        tensor = self.offload_handler.tensor_pop(saved_state, **self.handler_extra_kwargs)
163
164
165
166
167
        return tensor


class OffloadHandler:
    """A base class for CPU offload-handler."""
168

169
170
171
172
173
    def __init__(self) -> None:
        pass

    def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any:
        """Tensor push."""
174
175
176
177
        raise NotImplementedError(
            "`tensor_push is not implented in OffloadHandler class. "
            "Inherit this class and implement your custom tensor_push."
        )
178
179
180

    def tensor_pop(self, tensor_tag: Any, **kwargs):
        """Tensor pop."""
181
182
183
184
        raise NotImplementedError(
            "`tensor_pop is not implented in OffloadHandler class. "
            "Inherit this class and implement your custom tensor_pop."
        )
185
186
187
188
189
190
191
192


class GroupCommitFunction(torch.autograd.Function):
    """this is a dummy op with output identical to input.
    However, it is necessary for marking a timepoint for offload handler to
    accomplish all synchronizations. Implementing it as a function is necessary
    because we need to actions in both forward and backward.
    """
193

194
195
    @staticmethod
    def forward(ctx, tensor, cpu_offload_handler):
196
        # pylint: disable=missing-function-docstring
197
198
199
200
201
202
203
        cpu_offload_handler.on_group_commit_forward()
        ctx.cpu_offload_handler = cpu_offload_handler
        # return the identical tensor
        return tensor

    @staticmethod
    def backward(ctx, grad_output):
204
        # pylint: disable=missing-function-docstring
205
206
207
208
209
210
211
212
213
214
215
216
217
        cpu_offload_handler = ctx.cpu_offload_handler
        cpu_offload_handler.on_group_commit_backward()
        return grad_output, None


group_prefetch_offload_commit = GroupCommitFunction.apply


class SynchronizedGroupOffloadHandler(OffloadHandler):
    """Offload Handler that offloads/reloads in a synchronized way.
    The device-to-host and host-to-device copying happen in the same stream
    as the computation kernels, thus the copying will block computation.
    """
218
219
220
221

    def __init__(
        self, num_offload_group, tensor_need_offloading_checker=(lambda _: True), debug=False
    ) -> None:
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        super().__init__()

        self.num_offload_group = num_offload_group
        self.tensor_need_offloading_checker = tensor_need_offloading_checker
        self.debug = debug

        self.groupid_reset()

    def groupid_reset(self):
        """Groupid reset."""
        # Data structures to label saved tensors and book-keep their cpu copies.
        # Currently, on push, create a new cpu tensor and copies; on pop, copies
        # the tensor back to gpu and deletes the cpu tensor.
        # These will increment whenever `group_commit()` is invoked
        self.current_group, self.tensor_count_current_group = (0, 0)
237
        self.torch_tensor_count = 0
238
239
240
241
242
        self.tensor_tag_to_state = {}

    def on_group_commit_forward(self):
        """On group commit forward."""
        # finishing up with updating current group and tensor count
243
244
        self.current_group += 1  # increment
        self.tensor_count_current_group = 0  # reset
245
246
247
248
249
250
251
252
253
254
255

    def on_group_commit_backward(self):
        """On group commit backward."""
        self.current_group -= 1
        assert self.current_group >= 0

    @staticmethod
    def offload(src_tensor, pin_memory=True):
        """Offload."""

        cpu_backup = torch.empty(
256
            src_tensor.size(),
257
            dtype=src_tensor.dtype,
258
259
260
261
            layout=src_tensor.layout,
            device="cpu",
            pin_memory=pin_memory,
        )
262
263
264
265
266
267

        cpu_backup.copy_(src_tensor, non_blocking=pin_memory)
        state = (src_tensor.device, cpu_backup)
        return state

    @staticmethod
268
    def reload(state, non_blocking=None, copy_buffer=None):
269
270
271
272
        """Reload."""
        dev, cpu_backup = state
        if non_blocking is None:
            non_blocking = cpu_backup.is_pinned()
273
274
275
276
277
278
279
280
281

        if copy_buffer is None:
            return cpu_backup.to(dev, non_blocking=non_blocking)

        assert cpu_backup.size() == copy_buffer.size(), "Can't copy two buffers of different sizes!"

        copy_buffer.copy_(cpu_backup, non_blocking=non_blocking)

        return copy_buffer
282
283
284
285
286
287
288

    def tensor_push(self, tensor: torch.Tensor, **kwargs):
        """Tensor push."""
        # obtain a unique tensor tag
        tensor_tag = (self.current_group, self.tensor_count_current_group)
        self.tensor_count_current_group += 1
        assert tensor_tag not in self.tensor_tag_to_state
289
290
291
        if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(
            tensor
        ):
292
293
294
295
296
            state = SynchronizedGroupOffloadHandler.offload(tensor)
            self.tensor_tag_to_state[tensor_tag] = state
        else:
            # will be offloaded together after group commit
            self.tensor_tag_to_state[tensor_tag] = tensor
297

298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
        return tensor_tag

    def tensor_pop(self, tensor_tag, **kwargs):
        """Tensor pop."""
        assert tensor_tag in self.tensor_tag_to_state
        state = self.tensor_tag_to_state.pop(tensor_tag)
        if isinstance(state, tuple):
            tensor = SynchronizedGroupOffloadHandler.reload(state)
        else:
            tensor = state
        return tensor


class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler):
    """Compared to synchronize, this uses more memory because of the buffer but
    achieves better performance due to the overlapping. D2h and h2d copying are
    completely hidden behind computation if computation time of a layer is longer
    than host-device communication time. Bulk offloading with delay and bulk reloading
316
317
318
319
320
    with prefetch are implemented."""

    def __init__(
        self,
        num_offload_group,  # must be <= actual number of groups (number of commits)
321
        num_model_group,
322
        tensor_need_offloading_checker=(lambda t: True),
323
        double_buffering=False,
324
325
326
327
328
329
330
        debug=False,
    ) -> None:
        super().__init__(
            num_offload_group=num_offload_group,
            tensor_need_offloading_checker=tensor_need_offloading_checker,
            debug=debug,
        )
331
332
        # Number of layers in the model
        self.num_layers = num_model_group
333
334
        # Data Structure to maintain reference to activation tensors
        self.tensor_tag_to_buf = {}
335
336
337
        # Data structure to hold the FP8/MXFP8 tensor objects
        self.fp8_tensor_object_map = {}
        self.float8_transpose_cache_valid = {}
338
        self.dereferencing_list = []
339
340
341
342
343
        # Tracking the number of layers offloaded
        self.offloaded_group_count = 0
        # Core data structure that decides the window for offloading
        self.layer_window_map = {}

344
345
346
347
348
        # Data structures fo double buffered reloading
        self.double_buffering = double_buffering
        self.reload_double_buffer = [[], []]
        self.double_buffer_created = False

349
350
351
352
353
354
355
356
357
358
        # Logic to make offloading load balance across computation
        # for optimal CPU/GPU interconnect usage
        constant = 0
        for i in range(self.num_offload_group):
            self.layer_window_map[i] = ((self.num_layers // self.num_offload_group) * (i + 1)) - 1
            if i < (self.num_layers % self.num_offload_group):
                self.layer_window_map[i] += i + 1
                constant = i + 1
            else:
                self.layer_window_map[i] += constant
359
360
361
362
363
364

        # allocate streams and events for synchronization
        self.d2h_stream = torch.cuda.Stream()
        self.h2d_stream = torch.cuda.Stream()

    def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any:
365
        global CPUOffloadedLayer
366

367
368
369
370
371
372
373
        torch_stray_tensor = isinstance(
            tensor,
            (
                torch._subclasses.fake_tensor.FakeTensor,
                torch._subclasses.functional_tensor.FunctionalTensor,
            ),
        )
374

375
        is_quantized_tensor = isinstance(tensor, QuantizedTensorStorage)
376

377
        if not torch_stray_tensor:
378

379
380
381
            # obtain a unique tensor tag
            tensor_tag = (self.current_group, self.tensor_count_current_group)
            self.tensor_count_current_group += 1
382

383
384
            assert tensor_tag not in self.tensor_tag_to_state

385
386
387
388
389
            if is_quantized_tensor:
                tensor_list, _ = tensor.prepare_for_saving()

                self.tensor_tag_to_state[tensor_tag] = []
                self.tensor_tag_to_buf[tensor_tag] = []
390

391
392
393
394
395
396
                # Added support for de-duplicating FP8 param tensors
                for _, value in self.fp8_tensor_object_map.items():
                    if tensor is value:
                        self.dereferencing_list.append(tensor_tag)
                        break

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
                self.fp8_tensor_object_map[tensor_tag] = tensor
                if isinstance(tensor, Float8Tensor):
                    self.float8_transpose_cache_valid[tensor_tag] = getattr(
                        tensor, "_transpose_invalid"
                    )
            else:
                tensor_list = [tensor]

            for t in tensor_list:
                if is_quantized_tensor:
                    self.tensor_tag_to_state[tensor_tag].append(t)
                else:
                    self.tensor_tag_to_state[tensor_tag] = t

                if (
                    self.current_group < self.num_offload_group
                    and self.tensor_need_offloading_checker(t)
                ):
                    if is_quantized_tensor:
                        self.tensor_tag_to_buf[tensor_tag].append(t)
                        # Need to clear the internal data reference for the quantized tensors
                        tensor.clear()
                    else:
                        self.tensor_tag_to_buf[tensor_tag] = t
421
422
423
424
425

                    # Needed to differentiate non offloaded layer's attention
                    # QKV layout of attention of non-offloaded layer needs
                    # to be modified while reloading
                    CPUOffloadedLayer = True
426
        else:
427
            tensor_tag = (-1, self.torch_tensor_count)
428
            self.torch_tensor_count += 1
429
            self.tensor_tag_to_state[tensor_tag] = tensor
430

431
432
433
434
        return tensor_tag

    def tensor_pop(self, tensor_tag, **kwargs):
        """Tensor pop."""
435
436
        global CPUOffloadedLayer

437
438
        assert tensor_tag in self.tensor_tag_to_state
        tensor = self.tensor_tag_to_state.pop(tensor_tag)
439
440
441

        # Handling the quantized tensor case specially here
        if isinstance(tensor, list):
442
443
444
445
446
447
            # If it's a duplicated tensor, we don't need to locally
            # write back a tensor as it would already be written
            if tensor_tag in self.dereferencing_list:
                self.dereferencing_list.remove(tensor_tag)
            else:
                self.fp8_tensor_object_map[tensor_tag].restore_from_saved(tensor)
448
449
            tensor = self.fp8_tensor_object_map.pop(tensor_tag)

450
        if self.double_buffering:
451
            tensor._do_not_clear = True
452

453
        self.tensor_tag_to_buf.pop(tensor_tag, None)
454
455
456
457
458
459
460
461
462
463
464
465
466
        # the tensor should have been copied back in on_group_commit_backward()
        # which invokes bulk_reload_group.
        assert not isinstance(tensor, tuple)
        return tensor

    def bulk_offload_group(self, group_to_offload):
        """Bulk offload group."""
        with torch.cuda.stream(self.d2h_stream):
            for tensor_tag, state in self.tensor_tag_to_state.items():
                group_id, _ = tensor_tag
                if group_id == group_to_offload:
                    assert not isinstance(state, tuple)

467
468
469
470
471
472
473
474
475
                    is_quantized_tensor = isinstance(state, list)

                    if is_quantized_tensor:
                        tensor_list = state
                        self.tensor_tag_to_state[tensor_tag] = []
                    else:
                        tensor_list = [state]

                    for tensor_on_device in tensor_list:
476
477
478
479
480
481
482
483
484
485
                        # `tensor_offloaded` is a hacky way of dealing with columnwise-only
                        # quantized tensors for CPU offloading. The complication is due to
                        # the `rowwise_data` being `None`. The offloading checker incorrectly
                        # returns `False` and the entire `state` ([None, columnwise_tensor])
                        # is added to the tensor tag state dict. A better design would change
                        # how quantized tensors are kept track of in the offload handler.
                        # Currently at every stage it is ensured that a quantized tensor is a
                        # list whereas a non-quantized tensor is standalone object, which is
                        # not good! TODO(@sanandaraj5597)
                        tensor_offloaded = False
486
487
                        # if offload, return the reference to cpu copy
                        if self.tensor_need_offloading_checker(tensor_on_device):
488
                            tensor_offloaded = True
489
490
                            state = SynchronizedGroupOffloadHandler.offload(tensor_on_device)
                        if is_quantized_tensor:
491
492
493
494
                            if tensor_offloaded:
                                self.tensor_tag_to_state[tensor_tag].append(state)
                            else:
                                self.tensor_tag_to_state[tensor_tag].append(tensor_on_device)
495
496
                        else:
                            self.tensor_tag_to_state[tensor_tag] = state
497
498
499

    def synchronize_on_group_commit_forward(self, current_group):
        """Synchronize on group commit forward."""
500
        global CPUOffloadedLayer
501
502
503
504
505

        # For the first group, kickstart the offload after we have
        # the first compute completion
        if current_group == 0:
            self.d2h_stream.wait_stream(torch.cuda.current_stream())
506
507
508
509
510
511
512
513
514
515
516
517
518
519

            if not self.double_buffer_created:
                # Creating the first copy of double buffer for tensors that are offloaded
                for tensor_tag, buf in self.tensor_tag_to_buf.items():
                    if isinstance(buf, list):
                        for b in buf:
                            self.reload_double_buffer[0].append(
                                torch.empty_like(b) if self.double_buffering else None
                            )
                    else:
                        self.reload_double_buffer[0].append(
                            torch.empty_like(buf) if self.double_buffering else None
                        )

520
            self.bulk_offload_group(current_group)
521
522
523
524
525
526
527
528
529
530

        # Window map data structure helps us synchronize based on number
        # of layers offloaded
        if self.layer_window_map[self.offloaded_group_count] == current_group:

            # Stream synchronization both ways
            self.d2h_stream.wait_stream(torch.cuda.current_stream())
            torch.cuda.current_stream().wait_stream(self.d2h_stream)

            # Time to free the activation memory after usage
531
            for tensor_tag, tensor_buf in self.tensor_tag_to_buf.items():
532
                if tensor_tag[0] == self.offloaded_group_count:
533
534
                    if hasattr(tensor_buf, "needs_force_clear"):
                        # Need to clear activation tensor - sometimes references persist in the code.
535
                        # This is the case for example with the Float8TensorStorage class,
536
537
538
539
                        # which is saved directly inside the ctx while its internal tensors are
                        # saved inside save_for_backward.
                        tensor_buf.data = torch.Tensor()
                    # Release the pointer to the tensor
540
541
542
543
544
545
546
547
                    self.tensor_tag_to_buf[tensor_tag] = None

            # Time to offload the next group
            if self.offloaded_group_count < (self.num_offload_group - 1):
                self.bulk_offload_group(self.offloaded_group_count + 1)

            # Increment the offload group count to keep track
            self.offloaded_group_count += 1
548

549
550
551
        if current_group == (self.num_offload_group - 1):
            CPUOffloadedLayer = False

552
553
554
555
556
557
558
559
560
        if not self.double_buffer_created:
            # Creating second copy of double buffer for tensors that are offloaded
            if current_group == (self.num_layers - 1):
                for buf in self.reload_double_buffer[0]:
                    self.reload_double_buffer[1].append(
                        torch.empty_like(buf) if self.double_buffering else None
                    )
                self.double_buffer_created = True

561
562
563
564
565
566
567
568
569
570
    def on_group_commit_forward(self):
        """This function will cause host device synchronization"""
        # handle synchronization events
        self.synchronize_on_group_commit_forward(self.current_group)

        super().on_group_commit_forward()

    def bulk_reload_group(self, group_to_reload):
        """Bulk reload group."""
        assert group_to_reload < self.num_offload_group
571

572
573
574
        buffer_idx = 0
        double_buffer_idx = group_to_reload % 2

575
576
        main_stream = torch.cuda.current_stream()

577
578
579
580
581
        with torch.cuda.stream(self.h2d_stream):
            # move back tensors
            for tensor_label, state in self.tensor_tag_to_state.items():
                group_id, _ = tensor_label
                if group_id == group_to_reload:
582

583
                    if isinstance(state, tuple):
584
585
586
587
588
589
590
591
                        if self.double_buffering:
                            reload_buffer = self.reload_double_buffer[double_buffer_idx][buffer_idx]
                        else:
                            with torch.cuda.stream(main_stream):
                                reload_buffer = torch.empty_like(
                                    state[1], device=torch.cuda.current_device()
                                )

592
                        recovered_tensor = SynchronizedGroupOffloadHandler.reload(
593
                            state, True, reload_buffer
594
595
                        )
                        buffer_idx = buffer_idx + 1
596
                        self.tensor_tag_to_state[tensor_label] = recovered_tensor
597
598
599
                    elif isinstance(state, list):
                        tensor_list = []
                        for state_tuple in state:
600

601
                            if isinstance(state_tuple, tuple):
602
603
604
605
606
607
608
609
610
611
                                if self.double_buffering:
                                    reload_buffer = self.reload_double_buffer[double_buffer_idx][
                                        buffer_idx
                                    ]
                                else:
                                    with torch.cuda.stream(main_stream):
                                        reload_buffer = torch.empty_like(
                                            state_tuple[1], device=torch.cuda.current_device()
                                        )

612
                                tensor_list.append(
613
614
615
                                    SynchronizedGroupOffloadHandler.reload(
                                        state_tuple,
                                        True,
616
                                        reload_buffer,
617
                                    )
618
                                )
619
                                buffer_idx = buffer_idx + 1
620
621
                            else:
                                tensor_list.append(state_tuple)
622
623
624
625
626
627
628
629
630
631

                        # No need to write back the duplicated tensor againn
                        # to the same location, this check ensures that
                        if tensor_label in self.dereferencing_list:
                            self.dereferencing_list.remove(tensor_label)
                        else:
                            _ = self.fp8_tensor_object_map[tensor_label].restore_from_saved(
                                tensor_list
                            )

632
633
634
635
                        if isinstance(self.fp8_tensor_object_map[tensor_label], Float8Tensor):
                            self.fp8_tensor_object_map[tensor_label]._transpose_invalid = (
                                self.float8_transpose_cache_valid.pop(tensor_label)
                            )
636

637
638
639
                        self.tensor_tag_to_state[tensor_label] = self.fp8_tensor_object_map.pop(
                            tensor_label
                        )
640
641
642
643
644
645
646
647

    def on_group_commit_backward(self):
        # first decrement the current group.
        # after last commit in forward, the group will +1; in backward it -1.
        # Finally it should be decremented to 0.
        self.current_group -= 1
        assert self.current_group >= 0

648
649
        # Layer window data structure helps us to reload at right times
        if self.layer_window_map[self.offloaded_group_count - 1] == self.current_group:
650

651
652
653
            # Stream synchronization both ways
            self.h2d_stream.wait_stream(torch.cuda.current_stream())
            torch.cuda.current_stream().wait_stream(self.h2d_stream)
654

655
656
            # Time to reload the next group
            self.bulk_reload_group(self.offloaded_group_count - 1)
657

658
659
            # Decrease the offloading group counter
            self.offloaded_group_count -= 1 if self.offloaded_group_count > 1 else 0
660

661
662
663
664
        # Last group computation needs to wait till all the reloads complete
        if self.current_group == 0:
            torch.cuda.current_stream().wait_stream(self.h2d_stream)
            self.offloaded_group_count = 0
665
666
667
668
669


def get_cpu_offload_context(
    enabled: bool = False,
    num_layers: int = 1,
670
    model_layers: int = 1,
671
    offload_activations: bool = True,
672
    offload_weights: bool = False,
673
    double_buffering: bool = False,
674
):
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
    """
    This function returns the CPU Offload context and the synchronizer function that needs to be
    used after every transformer layer. Returns `nullcontext()` if offloading is not enabled.

    Usage:

    .. code-block:: python

        cpu_offload_context, cpu_offload_synchronizer = get_cpu_offload_context(enabled=True)

        with cpu_offload_context:
            te_layer.forward(inp_tensor)
        cpu_offload_synchronizer()

    Parameters
    ----------
    enabled: bool, default = `False`
             When set to True, CPU Offloading functionality is enabled.
    num_layers: int, default = 1
                Determines the number of transformer layers
                you want to offload activations/weights for.
696
697
    model_layers: int, default = 1
                  Number of layers in the model that will be used under this context.
698
699
700
701
    offload_activations: bool, default = `True`
                         When set to `True`, offloads the activations for the TE layer.
    offload_weights: bool, default = `True`
                     When set to `True`, offloads the weights for the TE layer.
702
703
    double_buffering: bool, default = `False`
                      When set to `True`, uses double buffering for offloading.
704
705
706

    """

707
    if not offload_weights and not offload_activations:
708
709
        raise ValueError(
            "CPU Offloading is enabled while it is not "
710
711
            "mentioned what to offload (weights/activations)"
        )
712

713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
    if offload_weights:
        import warnings

        warnings.warn(
            "Offloading weights is deprecated. Using offload_weights=True does not have any"
            " effect.",
            DeprecationWarning,
        )

        # Weights offloading is deprecated but we maintain backward compatibility by doing nothing.
        if not offload_activations:
            return nullcontext(), lambda x: x

    def tensor_need_offloading_checker_activations(tensor):
        return hasattr(tensor, "activation_offloading")

    tensor_need_offloading_checker = tensor_need_offloading_checker_activations

731
    cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler(
732
        num_offload_group=num_layers,
733
        num_model_group=model_layers,
734
        tensor_need_offloading_checker=tensor_need_offloading_checker,
735
        double_buffering=double_buffering,
736
    )
737
738

    def group_prefetch_offload_commit_async(tensor):
739
        return group_prefetch_offload_commit(tensor, cpu_offload_handler)
740
741
742
743
744
745
746

    if enabled:
        return (
            CpuOffloadHookWithOffloadHandler(offload_handler=cpu_offload_handler),
            group_prefetch_offload_commit_async,
        )
    return nullcontext(), group_prefetch_offload_commit_async