feature_extraction.py 22.7 KB
Newer Older
1
import re
2
3
import warnings
from collections import OrderedDict
4
5
from copy import deepcopy
from itertools import chain
6
from typing import Dict, Callable, List, Union, Optional, Tuple
7
8
9

import torch
from torch import fx
10
from torch import nn
11
12
13
from torch.fx.graph_module import _copy_attr


14
__all__ = ["create_feature_extractor", "get_graph_node_names"]
15
16
17
18
19
20
21
22


class LeafModuleAwareTracer(fx.Tracer):
    """
    An fx.Tracer that allows the user to specify a set of leaf modules, ie.
    modules that are not to be traced through. The resulting graph ends up
    having single nodes referencing calls to the leaf modules' forward methods.
    """
23

24
25
    def __init__(self, *args, **kwargs):
        self.leaf_modules = {}
26
27
        if "leaf_modules" in kwargs:
            leaf_modules = kwargs.pop("leaf_modules")
28
            self.leaf_modules = leaf_modules
29
        super().__init__(*args, **kwargs)
30
31
32
33
34
35
36
37
38
39
40

    def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
        if isinstance(m, tuple(self.leaf_modules)):
            return True
        return super().is_leaf_module(m, module_qualname)


class NodePathTracer(LeafModuleAwareTracer):
    """
    NodePathTracer is an FX tracer that, for each operation, also records the
    name of the Node from which the operation originated. A node name here is
41
    a `.` separated path walking the hierarchy from top level module down to
42
43
44
45
46
47
48
49
50
51
52
53
54
    leaf operation or leaf module. The name of the top level module is not
    included as part of the node name. For example, if we trace a module whose
    forward method applies a ReLU module, the name for that node will simply
    be 'relu'.

    Some notes on the specifics:
        - Nodes are recorded to `self.node_to_qualname` which is a dictionary
          mapping a given Node object to its node name.
        - Nodes are recorded in the order which they are executed during
          tracing.
        - When a duplicate node name is encountered, a suffix of the form
          _{int} is added. The counter starts from 1.
    """
55

56
    def __init__(self, *args, **kwargs):
57
        super().__init__(*args, **kwargs)
58
        # Track the qualified name of the Node being traced
59
        self.current_module_qualname = ""
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
        # A map from FX Node to the qualified name\#
        # NOTE: This is loosely like the "qualified name" mentioned in the
        # torch.fx docs https://pytorch.org/docs/stable/fx.html but adapted
        # for the purposes of the torchvision feature extractor
        self.node_to_qualname = OrderedDict()

    def call_module(self, m: torch.nn.Module, forward: Callable, args, kwargs):
        """
        Override of `fx.Tracer.call_module`
        This override:
        1) Stores away the qualified name of the caller for restoration later
        2) Adds the qualified name of the caller to
           `current_module_qualname` for retrieval by `create_proxy`
        3) Once a leaf module is reached, calls `create_proxy`
        4) Restores the caller's qualified name into current_module_qualname
        """
        old_qualname = self.current_module_qualname
        try:
            module_qualname = self.path_of_module(m)
            self.current_module_qualname = module_qualname
            if not self.is_leaf_module(m, module_qualname):
                out = forward(*args, **kwargs)
                return out
83
            return self.create_proxy("call_module", module_qualname, args, kwargs)
84
85
86
        finally:
            self.current_module_qualname = old_qualname

87
88
89
    def create_proxy(
        self, kind: str, target: fx.node.Target, args, kwargs, name=None, type_expr=None, *_
    ) -> fx.proxy.Proxy:
90
91
92
93
94
95
        """
        Override of `Tracer.create_proxy`. This override intercepts the recording
        of every operation and stores away the current traced module's qualified
        name in `node_to_qualname`
        """
        proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr)
96
        self.node_to_qualname[proxy.node] = self._get_node_qualname(self.current_module_qualname, proxy.node)
97
98
        return proxy

99
    def _get_node_qualname(self, module_qualname: str, node: fx.node.Node) -> str:
100
        node_qualname = module_qualname
101

102
        if node.op != "call_module":
103
104
            # In this case module_qualname from torch.fx doesn't go all the
            # way to the leaf function/op so we need to append it
105
106
            if len(node_qualname) > 0:
                # Only append '.' if we are deeper than the top level module
107
                node_qualname += "."
108
            node_qualname += str(node)
109
110
111
112
113
114

        # Now we need to add an _{index} postfix on any repeated node names
        # For modules we do this from scratch
        # But for anything else, torch.fx already has a globally scoped
        # _{index} postfix. But we want it locally (relative to direct parent)
        # scoped. So first we need to undo the torch.fx postfix
115
116
        if re.match(r".+_[0-9]+$", node_qualname) is not None:
            node_qualname = node_qualname.rsplit("_", 1)[0]
117
118
119
120
121

        # ... and now we add on our own postfix
        for existing_qualname in reversed(self.node_to_qualname.values()):
            # Check to see if existing_qualname is of the form
            # {node_qualname} or {node_qualname}_{int}
122
123
            if re.match(rf"{node_qualname}(_[0-9]+)?$", existing_qualname) is not None:
                postfix = existing_qualname.replace(node_qualname, "")
124
125
126
127
128
129
                if len(postfix):
                    # existing_qualname is of the form {node_qualname}_{int}
                    next_index = int(postfix[1:]) + 1
                else:
                    # existing_qualname is of the form {node_qualname}
                    next_index = 1
130
                node_qualname += f"_{next_index}"
131
132
                break

133
134
135
136
137
138
139
140
141
142
143
        return node_qualname


def _is_subseq(x, y):
    """Check if y is a subseqence of x
    https://stackoverflow.com/a/24017747/4391249
    """
    iter_x = iter(x)
    return all(any(x_item == y_item for x_item in iter_x) for y_item in y)


144
def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathTracer):
145
146
147
148
149
150
151
    """
    Utility function for warning the user if there are differences between
    the train graph nodes and the eval graph nodes.
    """
    train_nodes = list(train_tracer.node_to_qualname.values())
    eval_nodes = list(eval_tracer.node_to_qualname.values())

152
    if len(train_nodes) == len(eval_nodes) and all(t == e for t, e in zip(train_nodes, eval_nodes)):
153
154
155
156
        return

    suggestion_msg = (
        "When choosing nodes for feature extraction, you may need to specify "
157
158
        "output nodes for train and eval mode separately."
    )
159
160

    if _is_subseq(train_nodes, eval_nodes):
161
162
163
164
        msg = (
            "NOTE: The nodes obtained by tracing the model in eval mode "
            "are a subsequence of those obtained in train mode. "
        )
165
    elif _is_subseq(eval_nodes, train_nodes):
166
167
168
169
        msg = (
            "NOTE: The nodes obtained by tracing the model in train mode "
            "are a subsequence of those obtained in eval mode. "
        )
170
    else:
171
        msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
172
173
174
175
    warnings.warn(msg + suggestion_msg)


def get_graph_node_names(
176
177
    model: nn.Module, tracer_kwargs: Dict = {}, suppress_diff_warning: bool = False
) -> Tuple[List[str], List[str]]:
178
179
180
181
182
183
    """
    Dev utility to return node names in order of execution. See note on node
    names under :func:`create_feature_extractor`. Useful for seeing which node
    names are available for feature extraction. There are two reasons that
    node names can't easily be read directly from the code for a model:

184
        1. Not all submodules are traced through. Modules from ``torch.nn`` all
185
186
           fall within this category.
        2. Nodes representing the repeated application of the same operation
187
           or leaf module get a ``_{counter}`` postfix.
188
189

    The model is traced twice: once in train mode, and once in eval mode. Both
190
191
192
193
194
    sets of node names are returned.

    For more details on the node naming conventions used here, please see the
    :ref:`relevant subheading <about-node-names>` in the
    `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
195
196
197
198

    Args:
        model (nn.Module): model for which we'd like to print node names
        tracer_kwargs (dict, optional): a dictionary of keywork arguments for
199
200
            ``NodePathTracer`` (they are eventually passed onto
            `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
        suppress_diff_warning (bool, optional): whether to suppress a warning
            when there are discrepancies between the train and eval version of
            the graph. Defaults to False.

    Returns:
        tuple(list, list): a list of node names from tracing the model in
        train mode, and another from tracing the model in eval mode.

    Examples::

        >>> model = torchvision.models.resnet18()
        >>> train_nodes, eval_nodes = get_graph_node_names(model)
    """
    is_training = model.training
    train_tracer = NodePathTracer(**tracer_kwargs)
    train_tracer.trace(model.train())
    eval_tracer = NodePathTracer(**tracer_kwargs)
    eval_tracer.trace(model.eval())
    train_nodes = list(train_tracer.node_to_qualname.values())
    eval_nodes = list(eval_tracer.node_to_qualname.values())
    if not suppress_diff_warning:
        _warn_graph_differences(train_tracer, eval_tracer)
    # Restore training state
    model.train(is_training)
    return train_nodes, eval_nodes


class DualGraphModule(fx.GraphModule):
    """
    A derivative of `fx.GraphModule`. Differs in the following ways:
    - Requires a train and eval version of the underlying graph
    - Copies submodules according to the nodes of both train and eval graphs.
    - Calling train(mode) switches between train graph and eval graph.
    """
235
236
237
238

    def __init__(
        self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule"
    ):
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        """
        Args:
            root (nn.Module): module from which the copied module hierarchy is
                built
            train_graph (fx.Graph): the graph that should be used in train mode
            eval_graph (fx.Graph): the graph that should be used in eval mode
        """
        super(fx.GraphModule, self).__init__()

        self.__class__.__name__ = class_name

        self.train_graph = train_graph
        self.eval_graph = eval_graph

        # Copy all get_attr and call_module ops (indicated by BOTH train and
        # eval graphs)
        for node in chain(iter(train_graph.nodes), iter(eval_graph.nodes)):
256
            if node.op in ["get_attr", "call_module"]:
257
258
259
260
261
262
263
264
265
266
267
268
269
                assert isinstance(node.target, str)
                _copy_attr(root, self, node.target)

        # train mode by default
        self.train()
        self.graph = train_graph

        # (borrowed from fx.GraphModule):
        # Store the Tracer class responsible for creating a Graph separately as part of the
        # GraphModule state, except when the Tracer is defined in a local namespace.
        # Locally defined Tracers are not pickleable. This is needed because torch.package will
        # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
        # to re-create the Graph during deserialization.
270
271
272
        assert (
            self.eval_graph._tracer_cls == self.train_graph._tracer_cls
        ), "Train mode and eval mode should use the same tracer class"
273
        self._tracer_cls = None
274
        if self.graph._tracer_cls and "<locals>" not in self.graph._tracer_cls.__qualname__:
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
            self._tracer_cls = self.graph._tracer_cls

    def train(self, mode=True):
        """
        Swap out the graph depending on the selected training mode.
        NOTE this should be safe when calling model.eval() because that just
        calls this with mode == False.
        """
        # NOTE: Only set self.graph if the current graph is not the desired
        # one. This saves us from recompiling the graph where not necessary.
        if mode and not self.training:
            self.graph = self.train_graph
        elif not mode and self.training:
            self.graph = self.eval_graph
        return super().train(mode=mode)


def create_feature_extractor(
293
294
295
296
297
298
299
    model: nn.Module,
    return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    tracer_kwargs: Dict = {},
    suppress_diff_warning: bool = False,
) -> fx.GraphModule:
300
301
302
303
304
305
306
    """
    Creates a new graph module that returns intermediate nodes from a given
    model as dictionary with user specified keys as strings, and the requested
    outputs as values. This is achieved by re-writing the computation graph of
    the model via FX to return the desired nodes as outputs. All unused nodes
    are removed, together with their corresponding parameters.

307
308
309
310
311
    Desired output nodes must be specified as a ``.`` separated
    path walking the module hierarchy from top level module down to leaf
    operation or leaf module. For more details on the node naming conventions
    used here, please see the :ref:`relevant subheading <about-node-names>`
    in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
312
313
314
315
316
317

    Not all models will be FX traceable, although with some massaging they can
    be made to cooperate. Here's a (not exhaustive) list of tips:

        - If you don't need to trace through a particular, problematic
          sub-module, turn it into a "leaf module" by passing a list of
318
319
          ``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
          It will not be traced through, but rather, the resulting graph will
320
321
          hold a reference to that module's forward method.
        - Likewise, you may turn functions into leaf functions by passing a
322
          list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
323
324
          example below).
        - Some inbuilt Python functions can be problematic. For instance,
325
326
327
          ``int`` will raise an error during tracing. You may wrap them in your
          own function and then pass that in ``autowrap_functions`` as one of
          the ``tracer_kwargs``.
328
329
330
331
332
333

    For further information on FX see the
    `torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.

    Args:
        model (nn.Module): model on which we will extract the features
334
        return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
335
336
            containing the names (or partial names - see note above)
            of the nodes for which the activations will be returned. If it is
337
            a ``Dict``, the keys are the node names, and the values
338
            are the user-specified keys for the graph module's returned
339
            dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
340
            node specification strings directly to output names. In the case
341
            that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
342
343
            this should not be specified.
        train_return_nodes (list or dict, optional): similar to
344
            ``return_nodes``. This can be used if the return nodes
345
            for train mode are different than those from eval mode.
346
347
            If this is specified, ``eval_return_nodes`` must also be specified,
            and ``return_nodes`` should not be specified.
348
        eval_return_nodes (list or dict, optional): similar to
349
            ``return_nodes``. This can be used if the return nodes
350
            for train mode are different than those from eval mode.
351
            If this is specified, ``train_return_nodes`` must also be specified,
352
353
            and `return_nodes` should not be specified.
        tracer_kwargs (dict, optional): a dictionary of keywork arguments for
354
355
            ``NodePathTracer`` (which passes them onto it's parent class
            `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
        suppress_diff_warning (bool, optional): whether to suppress a warning
            when there are discrepancies between the train and eval version of
            the graph. Defaults to False.

    Examples::

        >>> # Feature extraction with resnet
        >>> model = torchvision.models.resnet18()
        >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
        >>> model = create_feature_extractor(
        >>>     model, {'layer1': 'feat1', 'layer3': 'feat2'})
        >>> out = model(torch.rand(1, 3, 224, 224))
        >>> print([(k, v.shape) for k, v in out.items()])
        >>>     [('feat1', torch.Size([1, 64, 56, 56])),
        >>>      ('feat2', torch.Size([1, 256, 14, 14]))]

        >>> # Specifying leaf modules and leaf functions
        >>> def leaf_function(x):
        >>>     # This would raise a TypeError if traced through
        >>>     return int(x)
        >>>
        >>> class LeafModule(torch.nn.Module):
        >>>     def forward(self, x):
        >>>         # This would raise a TypeError if traced through
        >>>         int(x.shape[0])
        >>>         return torch.nn.functional.relu(x + 4)
        >>>
        >>> class MyModule(torch.nn.Module):
        >>>     def __init__(self):
        >>>         super().__init__()
        >>>         self.conv = torch.nn.Conv2d(3, 1, 3)
        >>>         self.leaf_module = LeafModule()
        >>>
        >>>     def forward(self, x):
        >>>         leaf_function(x.shape[0])
        >>>         x = self.conv(x)
        >>>         return self.leaf_module(x)
        >>>
        >>> model = create_feature_extractor(
        >>>     MyModule(), return_nodes=['leaf_module'],
        >>>     tracer_kwargs={'leaf_modules': [LeafModule],
        >>>                    'autowrap_functions': [leaf_function]})

    """
    is_training = model.training

402
403
404
    assert any(
        arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]
    ), "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
405

406
407
408
    assert not (
        (train_return_nodes is None) ^ (eval_return_nodes is None)
    ), "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
409

410
411
412
    assert (return_nodes is None) ^ (
        train_return_nodes is None
    ), "If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430

    # Put *_return_nodes into Dict[str, str] format
    def to_strdict(n) -> Dict[str, str]:
        if isinstance(n, list):
            return {str(i): str(i) for i in n}
        return {str(k): str(v) for k, v in n.items()}

    if train_return_nodes is None:
        return_nodes = to_strdict(return_nodes)
        train_return_nodes = deepcopy(return_nodes)
        eval_return_nodes = deepcopy(return_nodes)
    else:
        train_return_nodes = to_strdict(train_return_nodes)
        eval_return_nodes = to_strdict(eval_return_nodes)

    # Repeat the tracing and graph rewriting for train and eval mode
    tracers = {}
    graphs = {}
431
432
433
    mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
    for mode in ["train", "eval"]:
        if mode == "train":
434
            model.train()
435
        elif mode == "eval":
436
437
438
439
440
441
            model.eval()

        # Instantiate our NodePathTracer and use that to trace the model
        tracer = NodePathTracer(**tracer_kwargs)
        graph = tracer.trace(model)

442
        name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
443
444
445
446
        graph_module = fx.GraphModule(tracer.root, graph, name)

        available_nodes = list(tracer.node_to_qualname.values())
        # FIXME We don't know if we should expect this to happen
447
448
449
        assert len(set(available_nodes)) == len(
            available_nodes
        ), "There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
450
451
452
453
        # Check that all outputs in return_nodes are present in the model
        for query in mode_return_nodes[mode].keys():
            # To check if a query is available we need to check that at least
            # one of the available names starts with it up to a .
454
            if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
455
456
457
458
459
                raise ValueError(
                    f"node: '{query}' is not present in model. Hint: use "
                    "`get_graph_node_names` to make sure the "
                    "`return_nodes` you specified are present. It may even "
                    "be that you need to specify `train_return_nodes` and "
460
461
                    "`eval_return_nodes` separately."
                )
462
463
464
465

        # Remove existing output nodes (train mode)
        orig_output_nodes = []
        for n in reversed(graph_module.graph.nodes):
466
            if n.op == "output":
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
                orig_output_nodes.append(n)
        assert len(orig_output_nodes)
        for n in orig_output_nodes:
            graph_module.graph.erase_node(n)

        # Find nodes corresponding to return_nodes and make them into output_nodes
        nodes = [n for n in graph_module.graph.nodes]
        output_nodes = OrderedDict()
        for n in reversed(nodes):
            module_qualname = tracer.node_to_qualname.get(n)
            if module_qualname is None:
                # NOTE - Know cases where this happens:
                # - Node representing creation of a tensor constant - probably
                #   not interesting as a return node
                # - When packing outputs into a named tuple like in InceptionV3
                continue
            for query in mode_return_nodes[mode]:
484
485
                depth = query.count(".")
                if ".".join(module_qualname.split(".")[: depth + 1]) == query:
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
                    output_nodes[mode_return_nodes[mode][query]] = n
                    mode_return_nodes[mode].pop(query)
                    break
        output_nodes = OrderedDict(reversed(list(output_nodes.items())))

        # And add them in the end of the graph
        with graph_module.graph.inserting_after(nodes[-1]):
            graph_module.graph.output(output_nodes)

        # Remove unused modules / parameters
        graph_module.graph.eliminate_dead_code()
        graph_module.recompile()

        # Keep track of the tracer and graph so we can choose the main one
        tracers[mode] = tracer
        graphs[mode] = graph

    # Warn user if there are any discrepancies between the graphs of the
    # train and eval modes
    if not suppress_diff_warning:
506
        _warn_graph_differences(tracers["train"], tracers["eval"])
507
508

    # Build the final graph module
509
    graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)
510
511
512
513
514
515

    # Restore original training mode
    model.train(is_training)
    graph_module.train(is_training)

    return graph_module