Unverified Commit 48ebc0bb authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Add logging to torchvision ops (#4799)

* Add logging to torchvision ops

* Hack to make torchscript work

* Bugfix

* Bugfix

* Lint

* mypy... let's silence it

* Fighting with mymy

* One more try
parent f7da6cef
...@@ -5,6 +5,7 @@ import torchvision ...@@ -5,6 +5,7 @@ import torchvision
from torch import Tensor from torch import Tensor
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh
...@@ -33,6 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: ...@@ -33,6 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
Tensor: int64 tensor with the indices of the elements that have been kept Tensor: int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores by NMS, sorted in decreasing order of scores
""" """
_log_api_usage_once("torchvision.ops.nms")
_assert_has_ops() _assert_has_ops()
return torch.ops.torchvision.nms(boxes, scores, iou_threshold) return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
...@@ -61,6 +63,7 @@ def batched_nms( ...@@ -61,6 +63,7 @@ def batched_nms(
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
in decreasing order of scores in decreasing order of scores
""" """
_log_api_usage_once("torchvision.ops.batched_nms")
# Benchmarks that drove the following thresholds are at # Benchmarks that drove the following thresholds are at
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339 # https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
# Ideally for GPU we'd use a higher threshold # Ideally for GPU we'd use a higher threshold
...@@ -120,6 +123,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: ...@@ -120,6 +123,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
Tensor[K]: indices of the boxes that have both sides Tensor[K]: indices of the boxes that have both sides
larger than min_size larger than min_size
""" """
_log_api_usage_once("torchvision.ops.remove_small_boxes")
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size) keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0] keep = torch.where(keep)[0]
...@@ -138,6 +142,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: ...@@ -138,6 +142,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
Returns: Returns:
Tensor[N, 4]: clipped boxes Tensor[N, 4]: clipped boxes
""" """
_log_api_usage_once("torchvision.ops.clip_boxes_to_image")
dim = boxes.dim() dim = boxes.dim()
boxes_x = boxes[..., 0::2] boxes_x = boxes[..., 0::2]
boxes_y = boxes[..., 1::2] boxes_y = boxes[..., 1::2]
...@@ -178,6 +183,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: ...@@ -178,6 +183,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
Tensor[N, 4]: Boxes into converted format. Tensor[N, 4]: Boxes into converted format.
""" """
_log_api_usage_once("torchvision.ops.box_convert")
allowed_fmts = ("xyxy", "xywh", "cxcywh") allowed_fmts = ("xyxy", "xywh", "cxcywh")
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts: if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts:
raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt")
...@@ -227,6 +233,7 @@ def box_area(boxes: Tensor) -> Tensor: ...@@ -227,6 +233,7 @@ def box_area(boxes: Tensor) -> Tensor:
Returns: Returns:
Tensor[N]: the area for each box Tensor[N]: the area for each box
""" """
_log_api_usage_once("torchvision.ops.box_area")
boxes = _upcast(boxes) boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
...@@ -262,6 +269,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: ...@@ -262,6 +269,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
Returns: Returns:
Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
""" """
_log_api_usage_once("torchvision.ops.box_iou")
inter, union = _box_inter_union(boxes1, boxes2) inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union iou = inter / union
return iou return iou
...@@ -284,6 +292,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: ...@@ -284,6 +292,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
for every element in boxes1 and boxes2 for every element in boxes1 and boxes2
""" """
_log_api_usage_once("torchvision.ops.generalized_box_iou")
# degenerate boxes gives inf / nan results # degenerate boxes gives inf / nan results
# so do an early check # so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all() assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
...@@ -315,6 +324,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: ...@@ -315,6 +324,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:
Returns: Returns:
Tensor[N, 4]: bounding boxes Tensor[N, 4]: bounding boxes
""" """
_log_api_usage_once("torchvision.ops.masks_to_boxes")
if masks.numel() == 0: if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device, dtype=torch.float) return torch.zeros((0, 4), device=masks.device, dtype=torch.float)
......
...@@ -8,6 +8,8 @@ from torch.nn.modules.utils import _pair ...@@ -8,6 +8,8 @@ from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
def deform_conv2d( def deform_conv2d(
input: Tensor, input: Tensor,
...@@ -59,6 +61,7 @@ def deform_conv2d( ...@@ -59,6 +61,7 @@ def deform_conv2d(
>>> torch.Size([4, 5, 8, 8]) >>> torch.Size([4, 5, 8, 8])
""" """
_log_api_usage_once("torchvision.ops.deform_conv2d")
_assert_has_ops() _assert_has_ops()
out_channels = weight.shape[0] out_channels = weight.shape[0]
......
...@@ -4,6 +4,8 @@ from typing import Tuple, List, Dict, Optional ...@@ -4,6 +4,8 @@ from typing import Tuple, List, Dict, Optional
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn, Tensor from torch import nn, Tensor
from ..utils import _log_api_usage_once
class ExtraFPNBlock(nn.Module): class ExtraFPNBlock(nn.Module):
""" """
...@@ -75,6 +77,7 @@ class FeaturePyramidNetwork(nn.Module): ...@@ -75,6 +77,7 @@ class FeaturePyramidNetwork(nn.Module):
extra_blocks: Optional[ExtraFPNBlock] = None, extra_blocks: Optional[ExtraFPNBlock] = None,
): ):
super().__init__() super().__init__()
_log_api_usage_once(self)
self.inner_blocks = nn.ModuleList() self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList() self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list: for in_channels in in_channels_list:
......
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from ..utils import _log_api_usage_once
def sigmoid_focal_loss( def sigmoid_focal_loss(
inputs: torch.Tensor, inputs: torch.Tensor,
...@@ -30,6 +32,7 @@ def sigmoid_focal_loss( ...@@ -30,6 +32,7 @@ def sigmoid_focal_loss(
Returns: Returns:
Loss tensor with the reduction option applied. Loss tensor with the reduction option applied.
""" """
_log_api_usage_once("torchvision.ops.sigmoid_focal_loss")
p = torch.sigmoid(inputs) p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets) p_t = p * targets + (1 - p) * (1 - targets)
......
...@@ -14,6 +14,8 @@ from typing import Callable, List, Optional ...@@ -14,6 +14,8 @@ from typing import Callable, List, Optional
import torch import torch
from torch import Tensor from torch import Tensor
from ..utils import _log_api_usage_once
class Conv2d(torch.nn.Conv2d): class Conv2d(torch.nn.Conv2d):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
...@@ -66,6 +68,7 @@ class FrozenBatchNorm2d(torch.nn.Module): ...@@ -66,6 +68,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning) warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning)
num_features = n num_features = n
super().__init__() super().__init__()
_log_api_usage_once(self)
self.eps = eps self.eps = eps
self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("bias", torch.zeros(num_features))
...@@ -138,6 +141,7 @@ class ConvNormActivation(torch.nn.Sequential): ...@@ -138,6 +141,7 @@ class ConvNormActivation(torch.nn.Sequential):
if activation_layer is not None: if activation_layer is not None:
layers.append(activation_layer(inplace=inplace)) layers.append(activation_layer(inplace=inplace))
super().__init__(*layers) super().__init__(*layers)
_log_api_usage_once(self)
self.out_channels = out_channels self.out_channels = out_channels
...@@ -150,6 +154,7 @@ class SqueezeExcitation(torch.nn.Module): ...@@ -150,6 +154,7 @@ class SqueezeExcitation(torch.nn.Module):
scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid, scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
) -> None: ) -> None:
super().__init__() super().__init__()
_log_api_usage_once(self)
self.avgpool = torch.nn.AdaptiveAvgPool2d(1) self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1) self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1) self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
......
...@@ -5,6 +5,7 @@ import torchvision ...@@ -5,6 +5,7 @@ import torchvision
from torch import nn, Tensor from torch import nn, Tensor
from torchvision.ops.boxes import box_area from torchvision.ops.boxes import box_area
from ..utils import _log_api_usage_once
from .roi_align import roi_align from .roi_align import roi_align
...@@ -130,6 +131,7 @@ class MultiScaleRoIAlign(nn.Module): ...@@ -130,6 +131,7 @@ class MultiScaleRoIAlign(nn.Module):
canonical_level: int = 4, canonical_level: int = 4,
): ):
super().__init__() super().__init__()
_log_api_usage_once(self)
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = (output_size, output_size) output_size = (output_size, output_size)
self.featmap_names = featmap_names self.featmap_names = featmap_names
......
...@@ -3,6 +3,7 @@ from torch import nn, Tensor ...@@ -3,6 +3,7 @@ from torch import nn, Tensor
from torch.nn.modules.utils import _pair from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
...@@ -42,6 +43,7 @@ def ps_roi_align( ...@@ -42,6 +43,7 @@ def ps_roi_align(
Returns: Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs
""" """
_log_api_usage_once("torchvision.ops.ps_roi_align")
_assert_has_ops() _assert_has_ops()
check_roi_boxes_shape(boxes) check_roi_boxes_shape(boxes)
rois = boxes rois = boxes
......
...@@ -3,6 +3,7 @@ from torch import nn, Tensor ...@@ -3,6 +3,7 @@ from torch import nn, Tensor
from torch.nn.modules.utils import _pair from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
...@@ -36,6 +37,7 @@ def ps_roi_pool( ...@@ -36,6 +37,7 @@ def ps_roi_pool(
Returns: Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs. Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
""" """
_log_api_usage_once("torchvision.ops.ps_roi_pool")
_assert_has_ops() _assert_has_ops()
check_roi_boxes_shape(boxes) check_roi_boxes_shape(boxes)
rois = boxes rois = boxes
......
...@@ -6,6 +6,7 @@ from torch.jit.annotations import BroadcastingList2 ...@@ -6,6 +6,7 @@ from torch.jit.annotations import BroadcastingList2
from torch.nn.modules.utils import _pair from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
...@@ -49,6 +50,7 @@ def roi_align( ...@@ -49,6 +50,7 @@ def roi_align(
Returns: Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
""" """
_log_api_usage_once("torchvision.ops.roi_align")
_assert_has_ops() _assert_has_ops()
check_roi_boxes_shape(boxes) check_roi_boxes_shape(boxes)
rois = boxes rois = boxes
......
...@@ -6,6 +6,7 @@ from torch.jit.annotations import BroadcastingList2 ...@@ -6,6 +6,7 @@ from torch.jit.annotations import BroadcastingList2
from torch.nn.modules.utils import _pair from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape
...@@ -38,6 +39,7 @@ def roi_pool( ...@@ -38,6 +39,7 @@ def roi_pool(
Returns: Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
""" """
_log_api_usage_once("torchvision.ops.roi_pool")
_assert_has_ops() _assert_has_ops()
check_roi_boxes_shape(boxes) check_roi_boxes_shape(boxes)
rois = boxes rois = boxes
......
...@@ -2,6 +2,8 @@ import torch ...@@ -2,6 +2,8 @@ import torch
import torch.fx import torch.fx
from torch import nn, Tensor from torch import nn, Tensor
from ..utils import _log_api_usage_once
def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor: def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor:
""" """
...@@ -21,6 +23,7 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) ...@@ -21,6 +23,7 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True)
Returns: Returns:
Tensor[N, ...]: The randomly zeroed tensor. Tensor[N, ...]: The randomly zeroed tensor.
""" """
_log_api_usage_once("torchvision.ops.stochastic_depth")
if p < 0.0 or p > 1.0: if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
if mode not in ["batch", "row"]: if mode not in ["batch", "row"]:
......
import math import math
import pathlib import pathlib
import warnings import warnings
from typing import Union, Optional, List, Tuple, BinaryIO from typing import Union, Optional, List, Tuple, BinaryIO, no_type_check
import numpy as np import numpy as np
import torch import torch
...@@ -305,5 +305,13 @@ def _generate_color_palette(num_masks: int): ...@@ -305,5 +305,13 @@ def _generate_color_palette(num_masks: int):
return [tuple((i * palette) % 255) for i in range(num_masks)] return [tuple((i * palette) % 255) for i in range(num_masks)]
def _log_api_usage_once(obj: object) -> None: @no_type_check
torch._C._log_api_usage_once(f"{obj.__module__}.{obj.__class__.__name__}") def _log_api_usage_once(obj: str) -> None: # type: ignore
if torch.jit.is_scripting() or torch.jit.is_tracing():
return
# NOTE: obj can be an object as well, but mocking it here to be
# only a string to appease torchscript
if isinstance(obj, str):
torch._C._log_api_usage_once(obj)
else:
torch._C._log_api_usage_once(f"{obj.__module__}.{obj.__class__.__name__}")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment