Unverified Commit da7680f0 authored by Kai Zhang's avatar Kai Zhang Committed by GitHub
Browse files

Log io usage (#5038)

* add api usage log for io

* cover VideoReader

* cover c++ APIs

* add api usage log for io

* cover VideoReader

* cover c++ APIs

* add _cpp suffix to c++ APIs

* use new API and change cpp format

* remove _cpp suffix

* adopt new API

* lint
parent caff9d6d
...@@ -70,6 +70,8 @@ static void torch_jpeg_set_source_mgr( ...@@ -70,6 +70,8 @@ static void torch_jpeg_set_source_mgr(
} // namespace } // namespace
torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.image.cpu.decode_jpeg.decode_jpeg");
// Check that the input tensor dtype is uint8 // Check that the input tensor dtype is uint8
TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor");
// Check that the input tensor is 1-dimensional // Check that the input tensor is 1-dimensional
......
...@@ -23,6 +23,7 @@ torch::Tensor decode_png( ...@@ -23,6 +23,7 @@ torch::Tensor decode_png(
const torch::Tensor& data, const torch::Tensor& data,
ImageReadMode mode, ImageReadMode mode,
bool allow_16_bits) { bool allow_16_bits) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.io.image.cpu.decode_png.decode_png");
// Check that the input tensor dtype is uint8 // Check that the input tensor dtype is uint8
TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor");
// Check that the input tensor is 1-dimensional // Check that the input tensor is 1-dimensional
......
...@@ -25,6 +25,8 @@ using JpegSizeType = size_t; ...@@ -25,6 +25,8 @@ using JpegSizeType = size_t;
using namespace detail; using namespace detail;
torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.image.cpu.encode_jpeg.encode_jpeg");
// Define compression structures and error handling // Define compression structures and error handling
struct jpeg_compress_struct cinfo {}; struct jpeg_compress_struct cinfo {};
struct torch_jpeg_error_mgr jerr {}; struct torch_jpeg_error_mgr jerr {};
......
...@@ -63,6 +63,7 @@ void torch_png_write_data( ...@@ -63,6 +63,7 @@ void torch_png_write_data(
} // namespace } // namespace
torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.io.image.cpu.encode_png.encode_png");
// Define compression structures and error handling // Define compression structures and error handling
png_structp png_write; png_structp png_write;
png_infop info_ptr; png_infop info_ptr;
......
...@@ -33,6 +33,8 @@ std::wstring utf8_decode(const std::string& str) { ...@@ -33,6 +33,8 @@ std::wstring utf8_decode(const std::string& str) {
#endif #endif
torch::Tensor read_file(const std::string& filename) { torch::Tensor read_file(const std::string& filename) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.image.cpu.read_write_file.read_file");
#ifdef _WIN32 #ifdef _WIN32
// According to // According to
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019, // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019,
...@@ -76,6 +78,8 @@ torch::Tensor read_file(const std::string& filename) { ...@@ -76,6 +78,8 @@ torch::Tensor read_file(const std::string& filename) {
} }
void write_file(const std::string& filename, torch::Tensor& data) { void write_file(const std::string& filename, torch::Tensor& data) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.image.cpu.read_write_file.write_file");
// Check that the input tensor is on CPU // Check that the input tensor is on CPU
TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU");
......
...@@ -33,6 +33,8 @@ torch::Tensor decode_jpeg_cuda( ...@@ -33,6 +33,8 @@ torch::Tensor decode_jpeg_cuda(
const torch::Tensor& data, const torch::Tensor& data,
ImageReadMode mode, ImageReadMode mode,
torch::Device device) { torch::Device device) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.image.cuda.decode_jpeg_cuda.decode_jpeg_cuda");
TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor");
TORCH_CHECK( TORCH_CHECK(
......
...@@ -157,6 +157,7 @@ void Video::_getDecoderParams( ...@@ -157,6 +157,7 @@ void Video::_getDecoderParams(
} // _get decoder params } // _get decoder params
Video::Video(std::string videoPath, std::string stream, int64_t numThreads) { Video::Video(std::string videoPath, std::string stream, int64_t numThreads) {
C10_LOG_API_USAGE_ONCE("torchvision.csrc.io.video.video.Video");
// set number of threads global // set number of threads global
numThreads_ = numThreads; numThreads_ = numThreads;
// parse stream information // parse stream information
......
...@@ -583,6 +583,8 @@ torch::List<torch::Tensor> read_video_from_memory( ...@@ -583,6 +583,8 @@ torch::List<torch::Tensor> read_video_from_memory(
int64_t audioEndPts, int64_t audioEndPts,
int64_t audioTimeBaseNum, int64_t audioTimeBaseNum,
int64_t audioTimeBaseDen) { int64_t audioTimeBaseDen) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.video_reader.video_reader.read_video_from_memory");
return readVideo( return readVideo(
false, false,
input_video, input_video,
...@@ -627,6 +629,8 @@ torch::List<torch::Tensor> read_video_from_file( ...@@ -627,6 +629,8 @@ torch::List<torch::Tensor> read_video_from_file(
int64_t audioEndPts, int64_t audioEndPts,
int64_t audioTimeBaseNum, int64_t audioTimeBaseNum,
int64_t audioTimeBaseDen) { int64_t audioTimeBaseDen) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.video_reader.video_reader.read_video_from_file");
torch::Tensor dummy_input_video = torch::ones({0}); torch::Tensor dummy_input_video = torch::ones({0});
return readVideo( return readVideo(
true, true,
...@@ -653,10 +657,14 @@ torch::List<torch::Tensor> read_video_from_file( ...@@ -653,10 +657,14 @@ torch::List<torch::Tensor> read_video_from_file(
} }
torch::List<torch::Tensor> probe_video_from_memory(torch::Tensor input_video) { torch::List<torch::Tensor> probe_video_from_memory(torch::Tensor input_video) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.video_reader.video_reader.probe_video_from_memory");
return probeVideo(false, input_video, ""); return probeVideo(false, input_video, "");
} }
torch::List<torch::Tensor> probe_video_from_file(std::string videoPath) { torch::List<torch::Tensor> probe_video_from_file(std::string videoPath) {
C10_LOG_API_USAGE_ONCE(
"torchvision.csrc.io.video_reader.video_reader.probe_video_from_file");
torch::Tensor dummy_input_video = torch::ones({0}); torch::Tensor dummy_input_video = torch::ones({0});
return probeVideo(true, dummy_input_video, videoPath); return probeVideo(true, dummy_input_video, videoPath);
} }
......
...@@ -2,6 +2,7 @@ from typing import Any, Dict, Iterator ...@@ -2,6 +2,7 @@ from typing import Any, Dict, Iterator
import torch import torch
from ..utils import _log_api_usage_once
from ._video_opt import ( from ._video_opt import (
Timebase, Timebase,
VideoMetaData, VideoMetaData,
...@@ -106,6 +107,7 @@ class VideoReader: ...@@ -106,6 +107,7 @@ class VideoReader:
""" """
def __init__(self, path: str, stream: str = "video", num_threads: int = 0) -> None: def __init__(self, path: str, stream: str = "video", num_threads: int = 0) -> None:
_log_api_usage_once(self)
if not _has_video_opt(): if not _has_video_opt():
raise RuntimeError( raise RuntimeError(
"Not compiled with video_reader support, " "Not compiled with video_reader support, "
......
...@@ -4,6 +4,7 @@ from warnings import warn ...@@ -4,6 +4,7 @@ from warnings import warn
import torch import torch
from ..extension import _load_library from ..extension import _load_library
from ..utils import _log_api_usage_once
try: try:
...@@ -41,6 +42,8 @@ def read_file(path: str) -> torch.Tensor: ...@@ -41,6 +42,8 @@ def read_file(path: str) -> torch.Tensor:
Returns: Returns:
data (Tensor) data (Tensor)
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_file)
data = torch.ops.image.read_file(path) data = torch.ops.image.read_file(path)
return data return data
...@@ -54,6 +57,8 @@ def write_file(filename: str, data: torch.Tensor) -> None: ...@@ -54,6 +57,8 @@ def write_file(filename: str, data: torch.Tensor) -> None:
filename (str): the path to the file to be written filename (str): the path to the file to be written
data (Tensor): the contents to be written to the output file data (Tensor): the contents to be written to the output file
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_file)
torch.ops.image.write_file(filename, data) torch.ops.image.write_file(filename, data)
...@@ -74,6 +79,8 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE ...@@ -74,6 +79,8 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE
Returns: Returns:
output (Tensor[image_channels, image_height, image_width]) output (Tensor[image_channels, image_height, image_width])
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_png)
output = torch.ops.image.decode_png(input, mode.value, False) output = torch.ops.image.decode_png(input, mode.value, False)
return output return output
...@@ -93,6 +100,8 @@ def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: ...@@ -93,6 +100,8 @@ def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor:
Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the
PNG file. PNG file.
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_png)
output = torch.ops.image.encode_png(input, compression_level) output = torch.ops.image.encode_png(input, compression_level)
return output return output
...@@ -109,6 +118,8 @@ def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): ...@@ -109,6 +118,8 @@ def write_png(input: torch.Tensor, filename: str, compression_level: int = 6):
compression_level (int): Compression factor for the resulting file, it must be a number compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6 between 0 and 9. Default: 6
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_png)
output = encode_png(input, compression_level) output = encode_png(input, compression_level)
write_file(filename, output) write_file(filename, output)
...@@ -137,6 +148,8 @@ def decode_jpeg( ...@@ -137,6 +148,8 @@ def decode_jpeg(
Returns: Returns:
output (Tensor[image_channels, image_height, image_width]) output (Tensor[image_channels, image_height, image_width])
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_jpeg)
device = torch.device(device) device = torch.device(device)
if device.type == "cuda": if device.type == "cuda":
output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device) output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device)
...@@ -160,6 +173,8 @@ def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: ...@@ -160,6 +173,8 @@ def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor:
output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the
JPEG file. JPEG file.
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_jpeg)
if quality < 1 or quality > 100: if quality < 1 or quality > 100:
raise ValueError("Image quality should be a positive number between 1 and 100") raise ValueError("Image quality should be a positive number between 1 and 100")
...@@ -178,6 +193,8 @@ def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): ...@@ -178,6 +193,8 @@ def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75):
quality (int): Quality of the resulting JPEG file, it must be a number quality (int): Quality of the resulting JPEG file, it must be a number
between 1 and 100. Default: 75 between 1 and 100. Default: 75
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_jpeg)
output = encode_jpeg(input, quality) output = encode_jpeg(input, quality)
write_file(filename, output) write_file(filename, output)
...@@ -201,6 +218,8 @@ def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHAN ...@@ -201,6 +218,8 @@ def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHAN
Returns: Returns:
output (Tensor[image_channels, image_height, image_width]) output (Tensor[image_channels, image_height, image_width])
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_image)
output = torch.ops.image.decode_image(input, mode.value) output = torch.ops.image.decode_image(input, mode.value)
return output return output
...@@ -221,6 +240,8 @@ def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torc ...@@ -221,6 +240,8 @@ def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torc
Returns: Returns:
output (Tensor[image_channels, image_height, image_width]) output (Tensor[image_channels, image_height, image_width])
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_image)
data = read_file(path) data = read_file(path)
return decode_image(data, mode) return decode_image(data, mode)
......
...@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union ...@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
import torch import torch
from ..utils import _log_api_usage_once
from . import _video_opt from . import _video_opt
...@@ -77,6 +78,8 @@ def write_video( ...@@ -77,6 +78,8 @@ def write_video(
audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc. audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc.
audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_video)
_check_av_available() _check_av_available()
video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy()
...@@ -256,6 +259,8 @@ def read_video( ...@@ -256,6 +259,8 @@ def read_video(
aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int) info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int)
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_video)
from torchvision import get_video_backend from torchvision import get_video_backend
...@@ -374,6 +379,8 @@ def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[in ...@@ -374,6 +379,8 @@ def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[in
video_fps (float, optional): the frame rate for the video video_fps (float, optional): the frame rate for the video
""" """
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_video_timestamps)
from torchvision import get_video_backend from torchvision import get_video_backend
if get_video_backend() != "pyav": if get_video_backend() != "pyav":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment