utils.py 21.3 KB
Newer Older
1
import math
2
import pathlib
3
import warnings
Kai Zhang's avatar
Kai Zhang committed
4
from types import FunctionType
5
from typing import Any, BinaryIO, List, Optional, Tuple, Union
6

7
import numpy as np
8
import torch
9
from PIL import Image, ImageColor, ImageDraw, ImageFont
10

11
12
13
14
15
16
17
18
__all__ = [
    "make_grid",
    "save_image",
    "draw_bounding_boxes",
    "draw_segmentation_masks",
    "draw_keypoints",
    "flow_to_image",
]
19

20

21
@torch.no_grad()
22
def make_grid(
23
    tensor: Union[torch.Tensor, List[torch.Tensor]],
24
25
26
    nrow: int = 8,
    padding: int = 2,
    normalize: bool = False,
27
    value_range: Optional[Tuple[int, int]] = None,
28
    scale_each: bool = False,
29
    pad_value: float = 0.0,
30
    **kwargs,
31
) -> torch.Tensor:
32
33
    """
    Make a grid of images.
34

35
36
37
    Args:
        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
            or a list of images all of the same size.
38
        nrow (int, optional): Number of images displayed in each row of the grid.
Tongzhou Wang's avatar
Tongzhou Wang committed
39
40
            The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
        padding (int, optional): amount of padding. Default: ``2``.
41
        normalize (bool, optional): If True, shift the image to the range (0, 1),
42
            by the min and max values specified by ``value_range``. Default: ``False``.
43
        value_range (tuple, optional): tuple (min, max) where min and max are numbers,
44
45
            then these numbers are used to normalize the image. By default, min and max
            are computed from the tensor.
46
47
48
49
        range (tuple. optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``value_range``
                instead.
Tongzhou Wang's avatar
Tongzhou Wang committed
50
51
52
        scale_each (bool, optional): If ``True``, scale each image in the batch of
            images separately rather than the (min, max) over all images. Default: ``False``.
        pad_value (float, optional): Value for the padded pixels. Default: ``0``.
53

54
55
    Returns:
        grid (Tensor): the tensor containing grid of images.
56
    """
Kai Zhang's avatar
Kai Zhang committed
57
58
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(make_grid)
59
60
    if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
        raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}")
61
62

    if "range" in kwargs.keys():
63
64
65
66
        warnings.warn(
            "The parameter 'range' is deprecated since 0.12 and will be removed in 0.14. "
            "Please use 'value_range' instead."
        )
67
        value_range = kwargs["range"]
68

69
    # if list of tensors, convert to a 4D mini-batch Tensor
70
    if isinstance(tensor, list):
71
        tensor = torch.stack(tensor, dim=0)
72

73
    if tensor.dim() == 2:  # single image H x W
74
        tensor = tensor.unsqueeze(0)
75
    if tensor.dim() == 3:  # single image
76
        if tensor.size(0) == 1:  # if single-channel, convert to 3-channel
Adam Lerer's avatar
Adam Lerer committed
77
            tensor = torch.cat((tensor, tensor, tensor), 0)
78
        tensor = tensor.unsqueeze(0)
79

80
    if tensor.dim() == 4 and tensor.size(1) == 1:  # single-channel images
81
        tensor = torch.cat((tensor, tensor, tensor), 1)
82
83

    if normalize is True:
84
        tensor = tensor.clone()  # avoid modifying tensor in-place
85
86
        if value_range is not None and not isinstance(value_range, tuple):
            raise TypeError("value_range has to be a tuple (min, max) if specified. min and max are numbers")
87

88
89
90
        def norm_ip(img, low, high):
            img.clamp_(min=low, max=high)
            img.sub_(low).div_(max(high - low, 1e-5))
91

92
93
94
        def norm_range(t, value_range):
            if value_range is not None:
                norm_ip(t, value_range[0], value_range[1])
95
            else:
96
                norm_ip(t, float(t.min()), float(t.max()))
97
98
99

        if scale_each is True:
            for t in tensor:  # loop over mini-batch dimension
100
                norm_range(t, value_range)
101
        else:
102
            norm_range(tensor, value_range)
103

104
105
    if not isinstance(tensor, torch.Tensor):
        raise TypeError("tensor should be of type torch.Tensor")
106
    if tensor.size(0) == 1:
107
        return tensor.squeeze(0)
108

109
110
111
    # make the mini-batch of images into a grid
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
112
    ymaps = int(math.ceil(float(nmaps) / xmaps))
113
    height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
114
115
    num_channels = tensor.size(1)
    grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
116
    k = 0
117
118
    for y in range(ymaps):
        for x in range(xmaps):
119
120
            if k >= nmaps:
                break
121
122
123
124
125
            # Tensor.copy_() is a valid method but seems to be missing from the stubs
            # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_
            grid.narrow(1, y * height + padding, height - padding).narrow(  # type: ignore[attr-defined]
                2, x * width + padding, width - padding
            ).copy_(tensor[k])
126
127
128
129
            k = k + 1
    return grid


130
@torch.no_grad()
131
def save_image(
132
    tensor: Union[torch.Tensor, List[torch.Tensor]],
133
    fp: Union[str, pathlib.Path, BinaryIO],
134
    format: Optional[str] = None,
135
    **kwargs,
136
) -> None:
137
138
    """
    Save a given Tensor into an image file.
139
140
141
142

    Args:
        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images by calling ``make_grid``.
143
        fp (string or file object): A filename or a file object
144
145
        format(Optional):  If omitted, the format to use is determined from the filename extension.
            If a file object was used instead of a filename, this parameter should always be used.
146
        **kwargs: Other arguments are documented in ``make_grid``.
147
    """
148

Kai Zhang's avatar
Kai Zhang committed
149
150
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(save_image)
151
    grid = make_grid(tensor, **kwargs)
152
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
153
    ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
154
    im = Image.fromarray(ndarr)
155
    im.save(fp, format=format)
156
157
158
159
160
161
162


@torch.no_grad()
def draw_bounding_boxes(
    image: torch.Tensor,
    boxes: torch.Tensor,
    labels: Optional[List[str]] = None,
163
    colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,
164
    fill: Optional[bool] = False,
165
166
    width: int = 1,
    font: Optional[str] = None,
167
    font_size: Optional[int] = None,
168
169
170
171
172
) -> torch.Tensor:

    """
    Draws bounding boxes on given image.
    The values of the input image should be uint8 between 0 and 255.
173
    If fill is True, Resulting Tensor should be saved as PNG image.
174
175

    Args:
176
        image (Tensor): Tensor of shape (C x H x W) and dtype uint8.
177
        boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
178
179
180
            the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
            `0 <= ymin < ymax < H`.
        labels (List[str]): List containing the labels of bounding boxes.
181
182
183
        colors (color or list of colors, optional): List containing the colors
            of the boxes or single color for all boxes. The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
184
            By default, random colors are generated for boxes.
185
        fill (bool): If `True` fills the bounding box with specified color.
186
187
188
189
190
        width (int): Width of bounding box.
        font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
            also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
            `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
        font_size (int): The requested font size in points.
191
192
193

    Returns:
        img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted.
194
195
    """

Kai Zhang's avatar
Kai Zhang committed
196
197
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_bounding_boxes)
198
199
200
201
202
203
    if not isinstance(image, torch.Tensor):
        raise TypeError(f"Tensor expected, got {type(image)}")
    elif image.dtype != torch.uint8:
        raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
204
205
206
    elif image.size(0) not in {1, 3}:
        raise ValueError("Only grayscale and RGB images are supported")

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
    num_boxes = boxes.shape[0]

    if labels is None:
        labels: Union[List[str], List[None]] = [None] * num_boxes  # type: ignore[no-redef]
    elif len(labels) != num_boxes:
        raise ValueError(
            f"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box."
        )

    if colors is None:
        colors = _generate_color_palette(num_boxes)
    elif isinstance(colors, list):
        if len(colors) < num_boxes:
            raise ValueError(f"Number of colors ({len(colors)}) is less than number of boxes ({num_boxes}). ")
    else:  # colors specifies a single color for all boxes
        colors = [colors] * num_boxes

    colors = [(ImageColor.getrgb(color) if isinstance(color, str) else color) for color in colors]

226
227
228
229
230
231
232
    if font is None:
        if font_size is not None:
            warnings.warn("Argument 'font_size' will be ignored since 'font' is not set.")
        txt_font = ImageFont.load_default()
    else:
        txt_font = ImageFont.truetype(font=font, size=font_size or 10)

233
    # Handle Grayscale images
234
235
    if image.size(0) == 1:
        image = torch.tile(image, (3, 1, 1))
236

237
    ndarr = image.permute(1, 2, 0).cpu().numpy()
238
239
240
    img_to_draw = Image.fromarray(ndarr)
    img_boxes = boxes.to(torch.int64).tolist()

241
242
243
244
245
    if fill:
        draw = ImageDraw.Draw(img_to_draw, "RGBA")
    else:
        draw = ImageDraw.Draw(img_to_draw)

246
    for bbox, color, label in zip(img_boxes, colors, labels):  # type: ignore[arg-type]
247
        if fill:
248
            fill_color = color + (100,)
249
250
251
            draw.rectangle(bbox, width=width, outline=color, fill=fill_color)
        else:
            draw.rectangle(bbox, width=width, outline=color)
252

253
        if label is not None:
254
            margin = width + 1
255
            draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font)
256

257
    return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)
258
259
260
261
262
263


@torch.no_grad()
def draw_segmentation_masks(
    image: torch.Tensor,
    masks: torch.Tensor,
264
    alpha: float = 0.8,
265
    colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,
266
267
268
269
270
271
272
) -> torch.Tensor:

    """
    Draws segmentation masks on given RGB image.
    The values of the input image should be uint8 between 0 and 255.

    Args:
273
274
275
276
        image (Tensor): Tensor of shape (3, H, W) and dtype uint8.
        masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool.
        alpha (float): Float number between 0 and 1 denoting the transparency of the masks.
            0 means full transparency, 1 means no transparency.
277
278
279
280
        colors (color or list of colors, optional): List containing the colors
            of the masks or single color for all masks. The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
            By default, random colors are generated for each mask.
281
282

    Returns:
283
        img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top.
284
285
    """

Kai Zhang's avatar
Kai Zhang committed
286
287
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_segmentation_masks)
288
    if not isinstance(image, torch.Tensor):
289
        raise TypeError(f"The image must be a tensor, got {type(image)}")
290
    elif image.dtype != torch.uint8:
291
        raise ValueError(f"The image dtype must be uint8, got {image.dtype}")
292
293
294
295
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
    elif image.size()[0] != 3:
        raise ValueError("Pass an RGB image. Other Image formats are not supported")
296
297
298
299
300
301
302
303
    if masks.ndim == 2:
        masks = masks[None, :, :]
    if masks.ndim != 3:
        raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)")
    if masks.dtype != torch.bool:
        raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}")
    if masks.shape[-2:] != image.shape[-2:]:
        raise ValueError("The image and the masks must have the same height and width")
304
305

    num_masks = masks.size()[0]
306
307
    if colors is not None and num_masks > len(colors):
        raise ValueError(f"There are more masks ({num_masks}) than colors ({len(colors)})")
308
309

    if colors is None:
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
        colors = _generate_color_palette(num_masks)

    if not isinstance(colors, list):
        colors = [colors]
    if not isinstance(colors[0], (tuple, str)):
        raise ValueError("colors must be a tuple or a string, or a list thereof")
    if isinstance(colors[0], tuple) and len(colors[0]) != 3:
        raise ValueError("It seems that you passed a tuple of colors instead of a list of colors")

    out_dtype = torch.uint8

    colors_ = []
    for color in colors:
        if isinstance(color, str):
            color = ImageColor.getrgb(color)
325
        colors_.append(torch.tensor(color, dtype=out_dtype))
326

327
328
329
330
    img_to_draw = image.detach().clone()
    # TODO: There might be a way to vectorize this
    for mask, color in zip(masks, colors_):
        img_to_draw[:, mask] = color[:, None]
331

332
333
    out = image * (1 - alpha) + img_to_draw * alpha
    return out.to(out_dtype)
334
335


336
337
338
339
@torch.no_grad()
def draw_keypoints(
    image: torch.Tensor,
    keypoints: torch.Tensor,
340
    connectivity: Optional[List[Tuple[int, int]]] = None,
341
342
343
344
345
346
347
348
349
350
351
352
353
    colors: Optional[Union[str, Tuple[int, int, int]]] = None,
    radius: int = 2,
    width: int = 3,
) -> torch.Tensor:

    """
    Draws Keypoints on given RGB image.
    The values of the input image should be uint8 between 0 and 255.

    Args:
        image (Tensor): Tensor of shape (3, H, W) and dtype uint8.
        keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances,
            in the format [x, y].
354
        connectivity (List[Tuple[int, int]]]): A List of tuple where,
355
356
357
358
359
360
361
362
363
364
            each tuple contains pair of keypoints to be connected.
        colors (str, Tuple): The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
        radius (int): Integer denoting radius of keypoint.
        width (int): Integer denoting width of line connecting keypoints.

    Returns:
        img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn.
    """

Kai Zhang's avatar
Kai Zhang committed
365
366
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_keypoints)
367
368
369
370
371
372
373
374
375
376
377
378
    if not isinstance(image, torch.Tensor):
        raise TypeError(f"The image must be a tensor, got {type(image)}")
    elif image.dtype != torch.uint8:
        raise ValueError(f"The image dtype must be uint8, got {image.dtype}")
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
    elif image.size()[0] != 3:
        raise ValueError("Pass an RGB image. Other Image formats are not supported")

    if keypoints.ndim != 3:
        raise ValueError("keypoints must be of shape (num_instances, K, 2)")

379
    ndarr = image.permute(1, 2, 0).cpu().numpy()
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
    img_to_draw = Image.fromarray(ndarr)
    draw = ImageDraw.Draw(img_to_draw)
    img_kpts = keypoints.to(torch.int64).tolist()

    for kpt_id, kpt_inst in enumerate(img_kpts):
        for inst_id, kpt in enumerate(kpt_inst):
            x1 = kpt[0] - radius
            x2 = kpt[0] + radius
            y1 = kpt[1] - radius
            y2 = kpt[1] + radius
            draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0)

        if connectivity:
            for connection in connectivity:
                start_pt_x = kpt_inst[connection[0]][0]
                start_pt_y = kpt_inst[connection[0]][1]

                end_pt_x = kpt_inst[connection[1]][0]
                end_pt_y = kpt_inst[connection[1]][1]

                draw.line(
                    ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)),
                    width=width,
                )

    return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)


408
409
410
411
412
413
414
415
# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization
@torch.no_grad()
def flow_to_image(flow: torch.Tensor) -> torch.Tensor:

    """
    Converts a flow to an RGB image.

    Args:
416
        flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float.
417
418

    Returns:
419
420
        img (Tensor): Image Tensor of dtype uint8 where each color corresponds
            to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input.
421
422
423
424
425
    """

    if flow.dtype != torch.float:
        raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.")

426
427
428
    orig_shape = flow.shape
    if flow.ndim == 3:
        flow = flow[None]  # Add batch dim
429

430
431
432
433
    if flow.ndim != 4 or flow.shape[1] != 2:
        raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.")

    max_norm = torch.sum(flow ** 2, dim=1).sqrt().max()
434
435
    epsilon = torch.finfo((flow).dtype).eps
    normalized_flow = flow / (max_norm + epsilon)
436
437
438
439
440
    img = _normalized_flow_to_image(normalized_flow)

    if len(orig_shape) == 3:
        img = img[0]  # Remove batch dim
    return img
441
442
443
444
445
446


@torch.no_grad()
def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:

    """
447
    Converts a batch of normalized flow to an RGB image.
448
449

    Args:
450
        normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W)
451
    Returns:
452
       img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8.
453
454
    """

455
    N, _, H, W = normalized_flow.shape
456
457
458
    device = normalized_flow.device
    flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)
    colorwheel = _make_colorwheel().to(device)  # shape [55x3]
459
    num_cols = colorwheel.shape[0]
460
461
    norm = torch.sum(normalized_flow ** 2, dim=1).sqrt()
    a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi
462
463
464
465
466
467
468
469
470
471
472
473
    fk = (a + 1) / 2 * (num_cols - 1)
    k0 = torch.floor(fk).to(torch.long)
    k1 = k0 + 1
    k1[k1 == num_cols] = 0
    f = fk - k0

    for c in range(colorwheel.shape[1]):
        tmp = colorwheel[:, c]
        col0 = tmp[k0] / 255.0
        col1 = tmp[k1] / 255.0
        col = (1 - f) * col0 + f * col1
        col = 1 - norm * (1 - col)
474
        flow_image[:, c, :, :] = torch.floor(255 * col)
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
    return flow_image


def _make_colorwheel() -> torch.Tensor:
    """
    Generates a color wheel for optical flow visualization as presented in:
    Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
    URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf.

    Returns:
        colorwheel (Tensor[55, 3]): Colorwheel Tensor.
    """

    RY = 15
    YG = 6
    GC = 4
    CB = 11
    BM = 13
    MR = 6

    ncols = RY + YG + GC + CB + BM + MR
    colorwheel = torch.zeros((ncols, 3))
    col = 0

    # RY
    colorwheel[0:RY, 0] = 255
    colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY)
    col = col + RY
    # YG
    colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG)
    colorwheel[col : col + YG, 1] = 255
    col = col + YG
    # GC
    colorwheel[col : col + GC, 1] = 255
    colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC)
    col = col + GC
    # CB
    colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB)
    colorwheel[col : col + CB, 2] = 255
    col = col + CB
    # BM
    colorwheel[col : col + BM, 2] = 255
    colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM)
    col = col + BM
    # MR
    colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR)
    colorwheel[col : col + MR, 0] = 255
    return colorwheel


525
def _generate_color_palette(num_objects: int):
526
    palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
527
    return [tuple((i * palette) % 255) for i in range(num_objects)]
528
529


Kai Zhang's avatar
Kai Zhang committed
530
def _log_api_usage_once(obj: Any) -> None:
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547

    """
    Logs API usage(module and name) within an organization.
    In a large ecosystem, it's often useful to track the PyTorch and
    TorchVision APIs usage. This API provides the similar functionality to the
    logging module in the Python stdlib. It can be used for debugging purpose
    to log which methods are used and by default it is inactive, unless the user
    manually subscribes a logger via the `SetAPIUsageLogger method <https://github.com/pytorch/pytorch/blob/eb3b9fe719b21fae13c7a7cf3253f970290a573e/c10/util/Logging.cpp#L114>`_.
    Please note it is triggered only once for the same API call within a process.
    It does not collect any data from open-source users since it is no-op by default.
    For more information, please refer to
    * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging;
    * Logging policy: https://github.com/pytorch/vision/issues/5052;

    Args:
        obj (class instance or method): an object to extract info from.
    """
Kai Zhang's avatar
Kai Zhang committed
548
    if not obj.__module__.startswith("torchvision"):
549
        return
Kai Zhang's avatar
Kai Zhang committed
550
551
552
553
    name = obj.__class__.__name__
    if isinstance(obj, FunctionType):
        name = obj.__name__
    torch._C._log_api_usage_once(f"{obj.__module__}.{name}")