Unverified Commit eac3dc7b authored by Kai Zhang's avatar Kai Zhang Committed by GitHub
Browse files

Simplified usage log API (#5095)



* log API v3

* make torchscript happy

* make torchscript happy

* add missing logs to constructor

* log ops C++ API as well

* fix type hint

* check function with isinstance
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 0b02d420
...@@ -20,6 +20,7 @@ at::Tensor deform_conv2d( ...@@ -20,6 +20,7 @@ at::Tensor deform_conv2d(
int64_t groups, int64_t groups,
int64_t offset_groups, int64_t offset_groups,
bool use_mask) { bool use_mask) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.deform_conv2d.deform_conv2d");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::deform_conv2d", "") .findSchemaOrThrow("torchvision::deform_conv2d", "")
.typed<decltype(deform_conv2d)>(); .typed<decltype(deform_conv2d)>();
......
...@@ -9,6 +9,7 @@ at::Tensor nms( ...@@ -9,6 +9,7 @@ at::Tensor nms(
const at::Tensor& dets, const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
double iou_threshold) { double iou_threshold) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.nms.nms");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::nms", "") .findSchemaOrThrow("torchvision::nms", "")
.typed<decltype(nms)>(); .typed<decltype(nms)>();
......
...@@ -12,6 +12,7 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_align( ...@@ -12,6 +12,7 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_align(
int64_t pooled_height, int64_t pooled_height,
int64_t pooled_width, int64_t pooled_width,
int64_t sampling_ratio) { int64_t sampling_ratio) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.ps_roi_align.ps_roi_align");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::ps_roi_align", "") .findSchemaOrThrow("torchvision::ps_roi_align", "")
.typed<decltype(ps_roi_align)>(); .typed<decltype(ps_roi_align)>();
......
...@@ -11,6 +11,7 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_pool( ...@@ -11,6 +11,7 @@ std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
double spatial_scale, double spatial_scale,
int64_t pooled_height, int64_t pooled_height,
int64_t pooled_width) { int64_t pooled_width) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.ps_roi_pool.ps_roi_pool");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::ps_roi_pool", "") .findSchemaOrThrow("torchvision::ps_roi_pool", "")
.typed<decltype(ps_roi_pool)>(); .typed<decltype(ps_roi_pool)>();
......
...@@ -16,6 +16,7 @@ at::Tensor roi_align( ...@@ -16,6 +16,7 @@ at::Tensor roi_align(
bool aligned) // The flag for pixel shift bool aligned) // The flag for pixel shift
// along each axis. // along each axis.
{ {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.roi_align.roi_align");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::roi_align", "") .findSchemaOrThrow("torchvision::roi_align", "")
.typed<decltype(roi_align)>(); .typed<decltype(roi_align)>();
......
...@@ -11,6 +11,7 @@ std::tuple<at::Tensor, at::Tensor> roi_pool( ...@@ -11,6 +11,7 @@ std::tuple<at::Tensor, at::Tensor> roi_pool(
double spatial_scale, double spatial_scale,
int64_t pooled_height, int64_t pooled_height,
int64_t pooled_width) { int64_t pooled_width) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.ops.roi_pool.roi_pool");
static auto op = c10::Dispatcher::singleton() static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::roi_pool", "") .findSchemaOrThrow("torchvision::roi_pool", "")
.typed<decltype(roi_pool)>(); .typed<decltype(roi_pool)>();
......
...@@ -35,7 +35,7 @@ class VisionDataset(data.Dataset): ...@@ -35,7 +35,7 @@ class VisionDataset(data.Dataset):
transform: Optional[Callable] = None, transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None, target_transform: Optional[Callable] = None,
) -> None: ) -> None:
_log_api_usage_once("datasets", self.__class__.__name__) _log_api_usage_once(self)
if isinstance(root, torch._six.string_classes): if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root) root = os.path.expanduser(root)
self.root = root self.root = root
......
...@@ -18,7 +18,7 @@ model_urls = { ...@@ -18,7 +18,7 @@ model_urls = {
class AlexNet(nn.Module): class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
......
...@@ -163,7 +163,7 @@ class DenseNet(nn.Module): ...@@ -163,7 +163,7 @@ class DenseNet(nn.Module):
) -> None: ) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
# First convolution # First convolution
self.features = nn.Sequential( self.features = nn.Sequential(
......
...@@ -27,7 +27,7 @@ class GeneralizedRCNN(nn.Module): ...@@ -27,7 +27,7 @@ class GeneralizedRCNN(nn.Module):
def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None: def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
self.transform = transform self.transform = transform
self.backbone = backbone self.backbone = backbone
self.rpn = rpn self.rpn = rpn
......
...@@ -337,7 +337,7 @@ class RetinaNet(nn.Module): ...@@ -337,7 +337,7 @@ class RetinaNet(nn.Module):
topk_candidates=1000, topk_candidates=1000,
): ):
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if not hasattr(backbone, "out_channels"): if not hasattr(backbone, "out_channels"):
raise ValueError( raise ValueError(
......
...@@ -182,7 +182,7 @@ class SSD(nn.Module): ...@@ -182,7 +182,7 @@ class SSD(nn.Module):
positive_fraction: float = 0.25, positive_fraction: float = 0.25,
): ):
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
self.backbone = backbone self.backbone = backbone
......
...@@ -120,7 +120,7 @@ class SSDLiteFeatureExtractorMobileNet(nn.Module): ...@@ -120,7 +120,7 @@ class SSDLiteFeatureExtractorMobileNet(nn.Module):
min_depth: int = 16, min_depth: int = 16,
): ):
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
assert not backbone[c4_pos].use_res_connect assert not backbone[c4_pos].use_res_connect
self.features = nn.Sequential( self.features = nn.Sequential(
......
...@@ -170,7 +170,7 @@ class EfficientNet(nn.Module): ...@@ -170,7 +170,7 @@ class EfficientNet(nn.Module):
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
""" """
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if not inverted_residual_setting: if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty") raise ValueError("The inverted_residual_setting should not be empty")
......
...@@ -39,7 +39,7 @@ class GoogLeNet(nn.Module): ...@@ -39,7 +39,7 @@ class GoogLeNet(nn.Module):
dropout_aux: float = 0.7, dropout_aux: float = 0.7,
) -> None: ) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if blocks is None: if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux] blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None: if init_weights is None:
......
...@@ -37,7 +37,7 @@ class Inception3(nn.Module): ...@@ -37,7 +37,7 @@ class Inception3(nn.Module):
dropout: float = 0.5, dropout: float = 0.5,
) -> None: ) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if inception_blocks is None: if inception_blocks is None:
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
if init_weights is None: if init_weights is None:
......
...@@ -98,7 +98,7 @@ class MNASNet(torch.nn.Module): ...@@ -98,7 +98,7 @@ class MNASNet(torch.nn.Module):
def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
assert alpha > 0.0 assert alpha > 0.0
self.alpha = alpha self.alpha = alpha
self.num_classes = num_classes self.num_classes = num_classes
......
...@@ -111,7 +111,7 @@ class MobileNetV2(nn.Module): ...@@ -111,7 +111,7 @@ class MobileNetV2(nn.Module):
""" """
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if block is None: if block is None:
block = InvertedResidual block = InvertedResidual
......
...@@ -151,7 +151,7 @@ class MobileNetV3(nn.Module): ...@@ -151,7 +151,7 @@ class MobileNetV3(nn.Module):
dropout (float): The droupout probability dropout (float): The droupout probability
""" """
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
if not inverted_residual_setting: if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty") raise ValueError("The inverted_residual_setting should not be empty")
......
...@@ -440,7 +440,7 @@ class RAFT(nn.Module): ...@@ -440,7 +440,7 @@ class RAFT(nn.Module):
If ``None`` (default), the flow is upsampled using interpolation. If ``None`` (default), the flow is upsampled using interpolation.
""" """
super().__init__() super().__init__()
_log_api_usage_once("models", self.__class__.__name__) _log_api_usage_once(self)
self.feature_encoder = feature_encoder self.feature_encoder = feature_encoder
self.context_encoder = context_encoder self.context_encoder = context_encoder
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment