Unverified Commit 26fe8fad authored by vfdev's avatar vfdev Committed by GitHub
Browse files

Remove custom ops interpolation with antialiasing (#5329)



* Removed custom ops for interp with AA

* Fixed umft issues
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 0db67d85
...@@ -14,13 +14,6 @@ file(GLOB VISION_SRCS ...@@ -14,13 +14,6 @@ file(GLOB VISION_SRCS
../../torchvision/csrc/ops/*.h ../../torchvision/csrc/ops/*.h
../../torchvision/csrc/ops/*.cpp) ../../torchvision/csrc/ops/*.cpp)
# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and IndexingUtils.h is unavailable on Android build
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.h")
add_library(${TARGET} SHARED add_library(${TARGET} SHARED
${VISION_SRCS} ${VISION_SRCS}
) )
......
...@@ -11,13 +11,6 @@ file(GLOB VISION_SRCS ...@@ -11,13 +11,6 @@ file(GLOB VISION_SRCS
../torchvision/csrc/ops/*.h ../torchvision/csrc/ops/*.h
../torchvision/csrc/ops/*.cpp) ../torchvision/csrc/ops/*.cpp)
# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and using TensorIterator unavailable with iOS
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.h")
add_library(${TARGET} STATIC add_library(${TARGET} STATIC
${VISION_SRCS} ${VISION_SRCS}
) )
......
...@@ -3,6 +3,7 @@ import itertools ...@@ -3,6 +3,7 @@ import itertools
import math import math
import os import os
import re import re
from functools import partial
from typing import Sequence from typing import Sequence
import numpy as np import numpy as np
...@@ -655,11 +656,13 @@ def test_resize_antialias(device, dt, size, interpolation): ...@@ -655,11 +656,13 @@ def test_resize_antialias(device, dt, size, interpolation):
def test_assert_resize_antialias(interpolation): def test_assert_resize_antialias(interpolation):
# Checks implementation on very large scales # Checks implementation on very large scales
# and catch TORCH_CHECK inside interpolate_aa_kernels.cu # and catch TORCH_CHECK inside PyTorch implementation
torch.manual_seed(12) torch.manual_seed(12)
tensor, pil_img = _create_data(1000, 1000, device="cuda") tensor, _ = _create_data(1000, 1000, device="cuda")
with pytest.raises(RuntimeError, match=r"Max supported scale factor is"): # Error message is not yet updated in pytorch nightly
# with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"):
with pytest.raises(RuntimeError, match=r"Too much shared memory required"):
F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True) F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)
...@@ -674,32 +677,12 @@ def test_interpolate_antialias_backward(device, dt, size, interpolation): ...@@ -674,32 +677,12 @@ def test_interpolate_antialias_backward(device, dt, size, interpolation):
return return
torch.manual_seed(12) torch.manual_seed(12)
if interpolation == BILINEAR:
forward_op = torch.ops.torchvision._interpolate_bilinear2d_aa
backward_op = torch.ops.torchvision._interpolate_bilinear2d_aa_backward
elif interpolation == BICUBIC:
forward_op = torch.ops.torchvision._interpolate_bicubic2d_aa
backward_op = torch.ops.torchvision._interpolate_bicubic2d_aa_backward
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = forward_op(i, size, False)
ctx.save_for_backward(i, result)
return result
@staticmethod
def backward(ctx, grad_output):
i, result = ctx.saved_tensors
ishape = i.shape
oshape = result.shape[2:]
return backward_op(grad_output, oshape, ishape, False)
x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),) x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),)
assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True)
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),) x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),)
assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
def check_functional_vs_PIL_vs_scripted( def check_functional_vs_PIL_vs_scripted(
......
This diff is collapsed.
This diff is collapsed.
#include "interpolate_aa.h"
#include <ATen/core/dispatch/Dispatcher.h>
#include <torch/library.h>
#include <torch/types.h>
namespace vision {
namespace ops {
at::Tensor _interpolate_bilinear2d_aa(
const at::Tensor& input, // Input image
at::IntArrayRef output_size, // Output image size
bool align_corners) // The flag to align corners
{
static auto op =
c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::_interpolate_bilinear2d_aa", "")
.typed<decltype(_interpolate_bilinear2d_aa)>();
return op.call(input, output_size, align_corners);
}
at::Tensor _interpolate_bicubic_aa(
const at::Tensor& input, // Input image
at::IntArrayRef output_size, // Output image size
bool align_corners) // The flag to align corners
{
static auto op =
c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::_interpolate_bicubic2d_aa", "")
.typed<decltype(_interpolate_bicubic2d_aa)>();
return op.call(input, output_size, align_corners);
}
namespace detail {
at::Tensor _interpolate_bilinear2d_aa_backward(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
static auto op =
c10::Dispatcher::singleton()
.findSchemaOrThrow(
"torchvision::_interpolate_bilinear2d_aa_backward", "")
.typed<decltype(_interpolate_bilinear2d_aa_backward)>();
return op.call(grad_output, output_size, output_size, align_corners);
}
at::Tensor _interpolate_bicubic2d_aa_backward(
const at::Tensor& grad_output,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners) {
static auto op =
c10::Dispatcher::singleton()
.findSchemaOrThrow(
"torchvision::_interpolate_bicubic2d_aa_backward", "")
.typed<decltype(_interpolate_bicubic2d_aa_backward)>();
return op.call(grad_output, output_size, output_size, align_corners);
}
} // namespace detail
TORCH_LIBRARY_FRAGMENT(torchvision, m) {
m.def(TORCH_SELECTIVE_SCHEMA(
"torchvision::_interpolate_bilinear2d_aa(Tensor input, int[] output_size, bool align_corners) -> Tensor"));
m.def(TORCH_SELECTIVE_SCHEMA(
"torchvision::_interpolate_bicubic2d_aa(Tensor input, int[] output_size, bool align_corners) -> Tensor"));
m.def(TORCH_SELECTIVE_SCHEMA(
"torchvision::_interpolate_bilinear2d_aa_backward(Tensor input, int[] output_size, int[] input_size, bool align_corners) -> Tensor"));
m.def(TORCH_SELECTIVE_SCHEMA(
"torchvision::_interpolate_bicubic2d_aa_backward(Tensor input, int[] output_size, int[] input_size, bool align_corners) -> Tensor"));
}
} // namespace ops
} // namespace vision
#pragma once
#include <ATen/ATen.h>
#include "../macros.h"
namespace vision {
namespace ops {
VISION_API at::Tensor _interpolate_bilinear2d_aa(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners = false);
VISION_API at::Tensor _interpolate_bicubic2d_aa(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners = false);
namespace detail {
VISION_API at::Tensor _interpolate_bilinear2d_aa_backward(
const at::Tensor& grad,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners = false);
VISION_API at::Tensor _interpolate_bicubic2d_aa_backward(
const at::Tensor& grad,
at::IntArrayRef output_size,
at::IntArrayRef input_size,
bool align_corners = false);
} // namespace detail
} // namespace ops
} // namespace vision
...@@ -481,13 +481,7 @@ def resize( ...@@ -481,13 +481,7 @@ def resize(
# Define align_corners to avoid warnings # Define align_corners to avoid warnings
align_corners = False if interpolation in ["bilinear", "bicubic"] else None align_corners = False if interpolation in ["bilinear", "bicubic"] else None
if antialias: img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners, antialias=antialias)
if interpolation == "bilinear":
img = torch.ops.torchvision._interpolate_bilinear2d_aa(img, [new_h, new_w], align_corners=False)
elif interpolation == "bicubic":
img = torch.ops.torchvision._interpolate_bicubic2d_aa(img, [new_h, new_w], align_corners=False)
else:
img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners)
if interpolation == "bicubic" and out_dtype == torch.uint8: if interpolation == "bicubic" and out_dtype == torch.uint8:
img = img.clamp(min=0, max=255) img = img.clamp(min=0, max=255)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment