Unverified Commit b0f9f7bc authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Use compiler.is_compiling() instead of deprecated _dynamo.is_compiling() (#8455)

parent 9b4b7423
...@@ -194,7 +194,7 @@ def resize( ...@@ -194,7 +194,7 @@ def resize(
# according to our benchmarks on eager, non-AVX CPUs should still prefer u8->f32->interpolate->u8 path for bilinear # according to our benchmarks on eager, non-AVX CPUs should still prefer u8->f32->interpolate->u8 path for bilinear
def _do_native_uint8_resize_on_cpu(interpolation: InterpolationMode) -> bool: def _do_native_uint8_resize_on_cpu(interpolation: InterpolationMode) -> bool:
if interpolation == InterpolationMode.BILINEAR: if interpolation == InterpolationMode.BILINEAR:
if torch._dynamo.is_compiling(): if torch.compiler.is_compiling():
return True return True
else: else:
return "AVX2" in torch.backends.cpu.get_cpu_capability() return "AVX2" in torch.backends.cpu.get_cpu_capability()
...@@ -525,7 +525,7 @@ def _get_inverse_affine_matrix( ...@@ -525,7 +525,7 @@ def _get_inverse_affine_matrix(
def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]: def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
if torch._dynamo.is_compiling() and not torch.jit.is_scripting(): if torch.compiler.is_compiling() and not torch.jit.is_scripting():
return _compute_affine_output_size_python(matrix, w, h) return _compute_affine_output_size_python(matrix, w, h)
else: else:
return _compute_affine_output_size_tensor(matrix, w, h) return _compute_affine_output_size_tensor(matrix, w, h)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment