"git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "c697f524761abd2314c030221a3ad2f7791eab4e"
Unverified Commit 82f9a187 authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Add warning in docs and Cpp code for nvjpeg leak on CUDA < 11.6 (#5482)

parent 7251769f
...@@ -47,6 +47,27 @@ torch::Tensor decode_jpeg_cuda( ...@@ -47,6 +47,27 @@ torch::Tensor decode_jpeg_cuda(
TORCH_CHECK(device.is_cuda(), "Expected a cuda device") TORCH_CHECK(device.is_cuda(), "Expected a cuda device")
int major_version;
int minor_version;
nvjpegStatus_t get_major_property_status =
nvjpegGetProperty(MAJOR_VERSION, &major_version);
nvjpegStatus_t get_minor_property_status =
nvjpegGetProperty(MINOR_VERSION, &minor_version);
TORCH_CHECK(
get_major_property_status == NVJPEG_STATUS_SUCCESS,
"nvjpegGetProperty failed: ",
get_major_property_status);
TORCH_CHECK(
get_minor_property_status == NVJPEG_STATUS_SUCCESS,
"nvjpegGetProperty failed: ",
get_minor_property_status);
if ((major_version < 11) || ((major_version == 11) && (minor_version < 6))) {
TORCH_WARN_ONCE(
"There is a memory leak issue in the nvjpeg library for CUDA versions < 11.6. "
"Make sure to rely on CUDA 11.6 or above before using decode_jpeg(..., device='cuda').");
}
at::cuda::CUDAGuard device_guard(device); at::cuda::CUDAGuard device_guard(device);
// Create global nvJPEG handle // Create global nvJPEG handle
......
...@@ -145,6 +145,10 @@ def decode_jpeg( ...@@ -145,6 +145,10 @@ def decode_jpeg(
with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only
supported for CUDA version >= 10.1 supported for CUDA version >= 10.1
.. warning::
There is a memory leak in the nvjpeg library for CUDA versions < 11.6.
Make sure to rely on CUDA 11.6 or above before using ``device="cuda"``.
Returns: Returns:
output (Tensor[image_channels, image_height, image_width]) output (Tensor[image_channels, image_height, image_width])
""" """
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment