sequential.py 7.13 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
#
# See LICENSE for license information.

"""Sequential container for fusible operations."""

from __future__ import annotations
from collections.abc import Iterable, Iterator
from typing import Optional

import torch

13
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
14
from transformer_engine.pytorch.ops.op import FusibleOperation
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from transformer_engine.pytorch.ops.fuser import OperationFuser


class Sequential(torch.nn.Module):
    """Sequential container for fusible operations

    This is a drop-in replacement for `torch.nn.Sequential`, with
    support for fusing `FusibleOperation`s.

    Parameters
    ----------
    *args: FusibleOperation or torch.nn.Module
        Neural network modules

    """

    def __init__(
        self,
        *args: FusibleOperation | torch.nn.Module,
    ) -> None:
        super().__init__()

        # List of modules, with fusible operations grouped together
        self._module_groups: Optional[list[OperationFuser | torch.nn.Module]]
        self._module_groups = None

41
42
43
        # Global state of last iteration
        self._last_global_state = None

44
        # Add modules
45
        if len(args) == 1 and isinstance(args[0], dict):
46
47
48
49
50
51
52
            for key, module in args[0].items():
                self.add_module(key, module)
        else:
            for module in args:
                self.append(module)

    def add_module(self, name: str, module: Optional[torch.nn.Module]) -> None:
53
        # pylint: disable=missing-function-docstring
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
        self._module_groups = None
        super().add_module(name, module)

    def _get_keys_by_idx(self, idx: int | slice) -> list[str]:
        """Get module keys corresponding to indices"""
        if isinstance(idx, slice):
            return list(self._modules.keys())[idx]
        size = len(self._modules)
        if not -size <= idx < size:
            raise IndexError(f"Attempted to access index {idx}, but there are {size} entries")
        if idx < 0:
            idx += size
        for i, key in enumerate(self._modules.keys()):
            if i == idx:
                return [key]
        raise RuntimeError(f"Could not access index {idx}")

    def _next_key(self) -> str:
        """Key for a newly added module"""
        idx = 0
        for key in self._modules.keys():
            try:
                key_idx = int(key)
            except (ValueError, TypeError):
                pass
            else:
                idx = max(idx, key_idx + 1)
        return str(idx)

    def __getitem__(
        self,
        idx: slice | int,
    ) -> Sequential | torch.nn.Module:
        keys = self._get_keys_by_idx(idx)
        if isinstance(idx, slice):
89
90
91
            out = Sequential()
            out.extend(self._modules[key] for key in keys)
            return out
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
        return self._modules[keys[0]]

    def __setitem__(self, idx: int, module: torch.nn.Module) -> None:
        self._module_groups = None
        key = self._get_keys_by_idx(idx)[0]
        self._modules[key] = module

    def __delitem__(self, idx: slice | int) -> None:
        self._module_groups = None
        for key in self._get_keys_by_idx(idx):
            del self._modules[key]

    def __len__(self) -> int:
        return len(self._modules)

    def __iter__(self) -> Iterator[torch.nn.Module]:
        return iter(self._modules.values())

    def append(self, module: torch.nn.Module) -> Sequential:
        """Add module at the end of the container"""
        self.add_module(self._next_key(), module)
        return self

    def extend(self, modules: Iterable[torch.nn.Module]) -> Sequential:
        """Add modules at the end of the container"""
        for module in modules:
            self.append(module)
        return self

    def insert(self, idx: int, module: torch.nn.Module) -> Sequential:
        """Add modules at a position in the container"""
        self._module_groups = None
        keys = self._get_keys_by_idx(slice(idx, None))
        keys.append(self._next_key())
        for i in reversed(range(1, len(keys))):
            self._modules[keys[i]] = self._modules[keys[i - 1]]
        self._modules[keys[0]] = module
        return self

    def pop(self, idx: slice | int) -> torch.nn.Module:
        """Remove module at a position in the container"""
        out = self[idx]
        del self[idx]
        return out

137
138
    def __iadd__(self, modules: Iterable[torch.nn.Modules]) -> Sequential:
        return self.extend(modules)
139
140

    def __add__(self, modules: Iterable[torch.nn.Modules]) -> Sequential:
141
142
        out = Sequential()
        out.extend(self)
143
144
145
146
147
148
149
150
151
152
        out.extend(modules)
        return out

    @classmethod
    def _make_module_groups(
        cls,
        modules: Iterable[torch.nn.Module],
    ) -> list[OperationFuser | torch.nn.Module]:
        """Make list of modules, with fusible operations grouped together"""

153
154
        # Group fusible operations together
        groups = []
155
156
        for module in modules:
            if isinstance(module, FusibleOperation):
157
158
159
                if not groups or not isinstance(groups[-1], list):
                    groups.append([])
                groups[-1].append(module)
160
            else:
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
                groups.append(module)
        for idx, group in enumerate(groups):
            if isinstance(group, list):
                groups[idx] = OperationFuser(group, fuse_ops=True)

        # Check if operations expect extra input or output tensors
        # Note: If any op has extra inputs or outputs, then the entire
        # Sequential must be made up of TE ops.
        if len(groups) > 1:
            ops = []
            for group in groups:
                if isinstance(group, OperationFuser):
                    ops.extend(group._basic_ops)
            num_extra_inputs = sum(op.num_extra_inputs for op in ops)
            num_extra_outputs = sum(op.num_extra_outputs for op in ops)
            if num_extra_inputs > 0 or num_extra_outputs > 0:
                raise RuntimeError(
                    f"`Sequential` expects {num_extra_inputs} extra inputs "
                    f"and {num_extra_outputs} extra outputs, "
                    "but it contains non-fusible operations"
                )

        return groups
184
185
186
187

    def forward(
        self,
        input: torch.Tensor,  # pylint: disable=redefined-builtin
188
189
        *extra_inputs: torch.Tensor,
    ) -> torch.Tensor | tuple[torch.Tensor, ...]:
190
191
        """Forward pass"""

192
193
194
195
196
197
198
199
200
201
        # Get current global state
        fp8_enabled = FP8GlobalStateManager.is_fp8_enabled()
        fp8_recipe = FP8GlobalStateManager.get_fp8_recipe() if fp8_enabled else None
        global_state = (fp8_enabled, type(fp8_recipe))

        # Reset module groups is global state changed
        if self._last_global_state != global_state:
            self._module_groups = None
            self._last_global_state = global_state

202
203
204
205
206
207
208
        # Create module groups if needed
        if self._module_groups is None:
            self._module_groups = self._make_module_groups(self._modules.values())

        # Forward pass for each module group
        x = input
        for module_group in self._module_groups:
209
            x = module_group(x, *extra_inputs)
210
        return x