utils.py 22.5 KB
Newer Older
1
import collections
2
import math
3
import pathlib
4
import warnings
5
from itertools import repeat
Kai Zhang's avatar
Kai Zhang committed
6
from types import FunctionType
7
from typing import Any, BinaryIO, List, Optional, Tuple, Union
8

9
import numpy as np
10
import torch
11
from PIL import Image, ImageColor, ImageDraw, ImageFont
12

13
14
15
16
17
18
19
20
__all__ = [
    "make_grid",
    "save_image",
    "draw_bounding_boxes",
    "draw_segmentation_masks",
    "draw_keypoints",
    "flow_to_image",
]
21

22

23
@torch.no_grad()
24
def make_grid(
25
    tensor: Union[torch.Tensor, List[torch.Tensor]],
26
27
28
    nrow: int = 8,
    padding: int = 2,
    normalize: bool = False,
29
    value_range: Optional[Tuple[int, int]] = None,
30
    scale_each: bool = False,
31
    pad_value: float = 0.0,
32
    **kwargs,
33
) -> torch.Tensor:
34
35
    """
    Make a grid of images.
36

37
38
39
    Args:
        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
            or a list of images all of the same size.
40
        nrow (int, optional): Number of images displayed in each row of the grid.
Tongzhou Wang's avatar
Tongzhou Wang committed
41
42
            The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
        padding (int, optional): amount of padding. Default: ``2``.
43
        normalize (bool, optional): If True, shift the image to the range (0, 1),
44
            by the min and max values specified by ``value_range``. Default: ``False``.
45
        value_range (tuple, optional): tuple (min, max) where min and max are numbers,
46
47
            then these numbers are used to normalize the image. By default, min and max
            are computed from the tensor.
48
49
50
51
        range (tuple. optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``value_range``
                instead.
Tongzhou Wang's avatar
Tongzhou Wang committed
52
53
54
        scale_each (bool, optional): If ``True``, scale each image in the batch of
            images separately rather than the (min, max) over all images. Default: ``False``.
        pad_value (float, optional): Value for the padded pixels. Default: ``0``.
55

56
57
    Returns:
        grid (Tensor): the tensor containing grid of images.
58
    """
Kai Zhang's avatar
Kai Zhang committed
59
60
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(make_grid)
61
62
63
64
65
66
67
    if not torch.is_tensor(tensor):
        if isinstance(tensor, list):
            for t in tensor:
                if not torch.is_tensor(t):
                    raise TypeError(f"tensor or list of tensors expected, got a list containing {type(t)}")
        else:
            raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}")
68
69

    if "range" in kwargs.keys():
70
71
72
73
        warnings.warn(
            "The parameter 'range' is deprecated since 0.12 and will be removed in 0.14. "
            "Please use 'value_range' instead."
        )
74
        value_range = kwargs["range"]
75

76
    # if list of tensors, convert to a 4D mini-batch Tensor
77
    if isinstance(tensor, list):
78
        tensor = torch.stack(tensor, dim=0)
79

80
    if tensor.dim() == 2:  # single image H x W
81
        tensor = tensor.unsqueeze(0)
82
    if tensor.dim() == 3:  # single image
83
        if tensor.size(0) == 1:  # if single-channel, convert to 3-channel
Adam Lerer's avatar
Adam Lerer committed
84
            tensor = torch.cat((tensor, tensor, tensor), 0)
85
        tensor = tensor.unsqueeze(0)
86

87
    if tensor.dim() == 4 and tensor.size(1) == 1:  # single-channel images
88
        tensor = torch.cat((tensor, tensor, tensor), 1)
89
90

    if normalize is True:
91
        tensor = tensor.clone()  # avoid modifying tensor in-place
92
93
        if value_range is not None and not isinstance(value_range, tuple):
            raise TypeError("value_range has to be a tuple (min, max) if specified. min and max are numbers")
94

95
96
97
        def norm_ip(img, low, high):
            img.clamp_(min=low, max=high)
            img.sub_(low).div_(max(high - low, 1e-5))
98

99
100
101
        def norm_range(t, value_range):
            if value_range is not None:
                norm_ip(t, value_range[0], value_range[1])
102
            else:
103
                norm_ip(t, float(t.min()), float(t.max()))
104
105
106

        if scale_each is True:
            for t in tensor:  # loop over mini-batch dimension
107
                norm_range(t, value_range)
108
        else:
109
            norm_range(tensor, value_range)
110

111
112
    if not isinstance(tensor, torch.Tensor):
        raise TypeError("tensor should be of type torch.Tensor")
113
    if tensor.size(0) == 1:
114
        return tensor.squeeze(0)
115

116
117
118
    # make the mini-batch of images into a grid
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
119
    ymaps = int(math.ceil(float(nmaps) / xmaps))
120
    height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
121
122
    num_channels = tensor.size(1)
    grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
123
    k = 0
124
125
    for y in range(ymaps):
        for x in range(xmaps):
126
127
            if k >= nmaps:
                break
128
129
130
131
132
            # Tensor.copy_() is a valid method but seems to be missing from the stubs
            # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_
            grid.narrow(1, y * height + padding, height - padding).narrow(  # type: ignore[attr-defined]
                2, x * width + padding, width - padding
            ).copy_(tensor[k])
133
134
135
136
            k = k + 1
    return grid


137
@torch.no_grad()
138
def save_image(
139
    tensor: Union[torch.Tensor, List[torch.Tensor]],
140
    fp: Union[str, pathlib.Path, BinaryIO],
141
    format: Optional[str] = None,
142
    **kwargs,
143
) -> None:
144
145
    """
    Save a given Tensor into an image file.
146
147
148
149

    Args:
        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images by calling ``make_grid``.
150
        fp (string or file object): A filename or a file object
151
152
        format(Optional):  If omitted, the format to use is determined from the filename extension.
            If a file object was used instead of a filename, this parameter should always be used.
153
        **kwargs: Other arguments are documented in ``make_grid``.
154
    """
155

Kai Zhang's avatar
Kai Zhang committed
156
157
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(save_image)
158
    grid = make_grid(tensor, **kwargs)
159
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
160
    ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
161
    im = Image.fromarray(ndarr)
162
    im.save(fp, format=format)
163
164
165
166
167
168
169


@torch.no_grad()
def draw_bounding_boxes(
    image: torch.Tensor,
    boxes: torch.Tensor,
    labels: Optional[List[str]] = None,
170
    colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,
171
    fill: Optional[bool] = False,
172
173
    width: int = 1,
    font: Optional[str] = None,
174
    font_size: Optional[int] = None,
175
176
177
178
179
) -> torch.Tensor:

    """
    Draws bounding boxes on given image.
    The values of the input image should be uint8 between 0 and 255.
180
    If fill is True, Resulting Tensor should be saved as PNG image.
181
182

    Args:
183
        image (Tensor): Tensor of shape (C x H x W) and dtype uint8.
184
        boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
185
186
187
            the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
            `0 <= ymin < ymax < H`.
        labels (List[str]): List containing the labels of bounding boxes.
188
189
190
        colors (color or list of colors, optional): List containing the colors
            of the boxes or single color for all boxes. The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
191
            By default, random colors are generated for boxes.
192
        fill (bool): If `True` fills the bounding box with specified color.
193
194
195
196
197
        width (int): Width of bounding box.
        font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
            also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
            `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
        font_size (int): The requested font size in points.
198
199
200

    Returns:
        img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted.
201
202
    """

Kai Zhang's avatar
Kai Zhang committed
203
204
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_bounding_boxes)
205
206
207
208
209
210
    if not isinstance(image, torch.Tensor):
        raise TypeError(f"Tensor expected, got {type(image)}")
    elif image.dtype != torch.uint8:
        raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
211
212
    elif image.size(0) not in {1, 3}:
        raise ValueError("Only grayscale and RGB images are supported")
213
214
215
216
    elif (boxes[:, 0] > boxes[:, 2]).any() or (boxes[:, 1] > boxes[:, 3]).any():
        raise ValueError(
            "Boxes need to be in (xmin, ymin, xmax, ymax) format. Use torchvision.ops.box_convert to convert them"
        )
217

218
219
    num_boxes = boxes.shape[0]

220
221
222
223
    if num_boxes == 0:
        warnings.warn("boxes doesn't contain any box. No box was drawn")
        return image

224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
    if labels is None:
        labels: Union[List[str], List[None]] = [None] * num_boxes  # type: ignore[no-redef]
    elif len(labels) != num_boxes:
        raise ValueError(
            f"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box."
        )

    if colors is None:
        colors = _generate_color_palette(num_boxes)
    elif isinstance(colors, list):
        if len(colors) < num_boxes:
            raise ValueError(f"Number of colors ({len(colors)}) is less than number of boxes ({num_boxes}). ")
    else:  # colors specifies a single color for all boxes
        colors = [colors] * num_boxes

    colors = [(ImageColor.getrgb(color) if isinstance(color, str) else color) for color in colors]

241
242
243
244
245
246
247
    if font is None:
        if font_size is not None:
            warnings.warn("Argument 'font_size' will be ignored since 'font' is not set.")
        txt_font = ImageFont.load_default()
    else:
        txt_font = ImageFont.truetype(font=font, size=font_size or 10)

248
    # Handle Grayscale images
249
250
    if image.size(0) == 1:
        image = torch.tile(image, (3, 1, 1))
251

252
    ndarr = image.permute(1, 2, 0).cpu().numpy()
253
254
255
    img_to_draw = Image.fromarray(ndarr)
    img_boxes = boxes.to(torch.int64).tolist()

256
257
258
259
260
    if fill:
        draw = ImageDraw.Draw(img_to_draw, "RGBA")
    else:
        draw = ImageDraw.Draw(img_to_draw)

261
    for bbox, color, label in zip(img_boxes, colors, labels):  # type: ignore[arg-type]
262
        if fill:
263
            fill_color = color + (100,)
264
265
266
            draw.rectangle(bbox, width=width, outline=color, fill=fill_color)
        else:
            draw.rectangle(bbox, width=width, outline=color)
267

268
        if label is not None:
269
            margin = width + 1
270
            draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font)
271

272
    return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)
273
274
275
276
277
278


@torch.no_grad()
def draw_segmentation_masks(
    image: torch.Tensor,
    masks: torch.Tensor,
279
    alpha: float = 0.8,
280
    colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,
281
282
283
284
285
286
287
) -> torch.Tensor:

    """
    Draws segmentation masks on given RGB image.
    The values of the input image should be uint8 between 0 and 255.

    Args:
288
289
290
291
        image (Tensor): Tensor of shape (3, H, W) and dtype uint8.
        masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool.
        alpha (float): Float number between 0 and 1 denoting the transparency of the masks.
            0 means full transparency, 1 means no transparency.
292
293
294
295
        colors (color or list of colors, optional): List containing the colors
            of the masks or single color for all masks. The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
            By default, random colors are generated for each mask.
296
297

    Returns:
298
        img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top.
299
300
    """

Kai Zhang's avatar
Kai Zhang committed
301
302
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_segmentation_masks)
303
    if not isinstance(image, torch.Tensor):
304
        raise TypeError(f"The image must be a tensor, got {type(image)}")
305
    elif image.dtype != torch.uint8:
306
        raise ValueError(f"The image dtype must be uint8, got {image.dtype}")
307
308
309
310
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
    elif image.size()[0] != 3:
        raise ValueError("Pass an RGB image. Other Image formats are not supported")
311
312
313
314
315
316
317
318
    if masks.ndim == 2:
        masks = masks[None, :, :]
    if masks.ndim != 3:
        raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)")
    if masks.dtype != torch.bool:
        raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}")
    if masks.shape[-2:] != image.shape[-2:]:
        raise ValueError("The image and the masks must have the same height and width")
319
320

    num_masks = masks.size()[0]
321
322
    if colors is not None and num_masks > len(colors):
        raise ValueError(f"There are more masks ({num_masks}) than colors ({len(colors)})")
323

324
325
326
327
    if num_masks == 0:
        warnings.warn("masks doesn't contain any mask. No mask was drawn")
        return image

328
    if colors is None:
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
        colors = _generate_color_palette(num_masks)

    if not isinstance(colors, list):
        colors = [colors]
    if not isinstance(colors[0], (tuple, str)):
        raise ValueError("colors must be a tuple or a string, or a list thereof")
    if isinstance(colors[0], tuple) and len(colors[0]) != 3:
        raise ValueError("It seems that you passed a tuple of colors instead of a list of colors")

    out_dtype = torch.uint8

    colors_ = []
    for color in colors:
        if isinstance(color, str):
            color = ImageColor.getrgb(color)
344
        colors_.append(torch.tensor(color, dtype=out_dtype))
345

346
347
348
349
    img_to_draw = image.detach().clone()
    # TODO: There might be a way to vectorize this
    for mask, color in zip(masks, colors_):
        img_to_draw[:, mask] = color[:, None]
350

351
352
    out = image * (1 - alpha) + img_to_draw * alpha
    return out.to(out_dtype)
353
354


355
356
357
358
@torch.no_grad()
def draw_keypoints(
    image: torch.Tensor,
    keypoints: torch.Tensor,
359
    connectivity: Optional[List[Tuple[int, int]]] = None,
360
361
362
363
364
365
366
367
368
369
370
371
372
    colors: Optional[Union[str, Tuple[int, int, int]]] = None,
    radius: int = 2,
    width: int = 3,
) -> torch.Tensor:

    """
    Draws Keypoints on given RGB image.
    The values of the input image should be uint8 between 0 and 255.

    Args:
        image (Tensor): Tensor of shape (3, H, W) and dtype uint8.
        keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances,
            in the format [x, y].
373
        connectivity (List[Tuple[int, int]]]): A List of tuple where,
374
375
376
377
378
379
380
381
382
383
            each tuple contains pair of keypoints to be connected.
        colors (str, Tuple): The color can be represented as
            PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``.
        radius (int): Integer denoting radius of keypoint.
        width (int): Integer denoting width of line connecting keypoints.

    Returns:
        img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn.
    """

Kai Zhang's avatar
Kai Zhang committed
384
385
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(draw_keypoints)
386
387
388
389
390
391
392
393
394
395
396
397
    if not isinstance(image, torch.Tensor):
        raise TypeError(f"The image must be a tensor, got {type(image)}")
    elif image.dtype != torch.uint8:
        raise ValueError(f"The image dtype must be uint8, got {image.dtype}")
    elif image.dim() != 3:
        raise ValueError("Pass individual images, not batches")
    elif image.size()[0] != 3:
        raise ValueError("Pass an RGB image. Other Image formats are not supported")

    if keypoints.ndim != 3:
        raise ValueError("keypoints must be of shape (num_instances, K, 2)")

398
    ndarr = image.permute(1, 2, 0).cpu().numpy()
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
    img_to_draw = Image.fromarray(ndarr)
    draw = ImageDraw.Draw(img_to_draw)
    img_kpts = keypoints.to(torch.int64).tolist()

    for kpt_id, kpt_inst in enumerate(img_kpts):
        for inst_id, kpt in enumerate(kpt_inst):
            x1 = kpt[0] - radius
            x2 = kpt[0] + radius
            y1 = kpt[1] - radius
            y2 = kpt[1] + radius
            draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0)

        if connectivity:
            for connection in connectivity:
                start_pt_x = kpt_inst[connection[0]][0]
                start_pt_y = kpt_inst[connection[0]][1]

                end_pt_x = kpt_inst[connection[1]][0]
                end_pt_y = kpt_inst[connection[1]][1]

                draw.line(
                    ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)),
                    width=width,
                )

    return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)


427
428
429
430
431
432
433
434
# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization
@torch.no_grad()
def flow_to_image(flow: torch.Tensor) -> torch.Tensor:

    """
    Converts a flow to an RGB image.

    Args:
435
        flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float.
436
437

    Returns:
438
439
        img (Tensor): Image Tensor of dtype uint8 where each color corresponds
            to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input.
440
441
442
443
444
    """

    if flow.dtype != torch.float:
        raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.")

445
446
447
    orig_shape = flow.shape
    if flow.ndim == 3:
        flow = flow[None]  # Add batch dim
448

449
450
451
    if flow.ndim != 4 or flow.shape[1] != 2:
        raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.")

452
    max_norm = torch.sum(flow**2, dim=1).sqrt().max()
453
454
    epsilon = torch.finfo((flow).dtype).eps
    normalized_flow = flow / (max_norm + epsilon)
455
456
457
458
459
    img = _normalized_flow_to_image(normalized_flow)

    if len(orig_shape) == 3:
        img = img[0]  # Remove batch dim
    return img
460
461
462
463
464
465


@torch.no_grad()
def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:

    """
466
    Converts a batch of normalized flow to an RGB image.
467
468

    Args:
469
        normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W)
470
    Returns:
471
       img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8.
472
473
    """

474
    N, _, H, W = normalized_flow.shape
475
476
477
    device = normalized_flow.device
    flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)
    colorwheel = _make_colorwheel().to(device)  # shape [55x3]
478
    num_cols = colorwheel.shape[0]
479
    norm = torch.sum(normalized_flow**2, dim=1).sqrt()
480
    a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi
481
482
483
484
485
486
487
488
489
490
491
492
    fk = (a + 1) / 2 * (num_cols - 1)
    k0 = torch.floor(fk).to(torch.long)
    k1 = k0 + 1
    k1[k1 == num_cols] = 0
    f = fk - k0

    for c in range(colorwheel.shape[1]):
        tmp = colorwheel[:, c]
        col0 = tmp[k0] / 255.0
        col1 = tmp[k1] / 255.0
        col = (1 - f) * col0 + f * col1
        col = 1 - norm * (1 - col)
493
        flow_image[:, c, :, :] = torch.floor(255 * col)
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
    return flow_image


def _make_colorwheel() -> torch.Tensor:
    """
    Generates a color wheel for optical flow visualization as presented in:
    Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
    URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf.

    Returns:
        colorwheel (Tensor[55, 3]): Colorwheel Tensor.
    """

    RY = 15
    YG = 6
    GC = 4
    CB = 11
    BM = 13
    MR = 6

    ncols = RY + YG + GC + CB + BM + MR
    colorwheel = torch.zeros((ncols, 3))
    col = 0

    # RY
    colorwheel[0:RY, 0] = 255
    colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY)
    col = col + RY
    # YG
    colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG)
    colorwheel[col : col + YG, 1] = 255
    col = col + YG
    # GC
    colorwheel[col : col + GC, 1] = 255
    colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC)
    col = col + GC
    # CB
    colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB)
    colorwheel[col : col + CB, 2] = 255
    col = col + CB
    # BM
    colorwheel[col : col + BM, 2] = 255
    colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM)
    col = col + BM
    # MR
    colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR)
    colorwheel[col : col + MR, 0] = 255
    return colorwheel


544
def _generate_color_palette(num_objects: int):
545
    palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])
546
    return [tuple((i * palette) % 255) for i in range(num_objects)]
547
548


Kai Zhang's avatar
Kai Zhang committed
549
def _log_api_usage_once(obj: Any) -> None:
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566

    """
    Logs API usage(module and name) within an organization.
    In a large ecosystem, it's often useful to track the PyTorch and
    TorchVision APIs usage. This API provides the similar functionality to the
    logging module in the Python stdlib. It can be used for debugging purpose
    to log which methods are used and by default it is inactive, unless the user
    manually subscribes a logger via the `SetAPIUsageLogger method <https://github.com/pytorch/pytorch/blob/eb3b9fe719b21fae13c7a7cf3253f970290a573e/c10/util/Logging.cpp#L114>`_.
    Please note it is triggered only once for the same API call within a process.
    It does not collect any data from open-source users since it is no-op by default.
    For more information, please refer to
    * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging;
    * Logging policy: https://github.com/pytorch/vision/issues/5052;

    Args:
        obj (class instance or method): an object to extract info from.
    """
567
568
569
    module = obj.__module__
    if not module.startswith("torchvision"):
        module = f"torchvision.internal.{module}"
Kai Zhang's avatar
Kai Zhang committed
570
571
572
    name = obj.__class__.__name__
    if isinstance(obj, FunctionType):
        name = obj.__name__
573
    torch._C._log_api_usage_once(f"{module}.{name}")
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588


def _make_ntuple(x: Any, n: int) -> Tuple[Any, ...]:
    """
    Make n-tuple from input x. If x is an iterable, then we just convert it to tuple.
    Otherwise we will make a tuple of length n, all with value of x.
    reference: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/utils.py#L8

    Args:
        x (Any): input value
        n (int): length of the resulting tuple
    """
    if isinstance(x, collections.abc.Iterable):
        return tuple(x)
    return tuple(repeat(x, n))