Unverified Commit 8e0e7157 authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

ignore git warning 'globally' (#6833)

* ignore git warning 'globally'

* improve comment
parent edb3a806
......@@ -2,7 +2,6 @@ import decimal
import functools
import itertools
import math
import re
import numpy as np
import pytest
......@@ -159,12 +158,6 @@ KERNEL_INFOS.extend(
KernelInfo(
F.horizontal_flip_bounding_box,
sample_inputs_fn=sample_inputs_horizontal_flip_bounding_box,
test_marks=[
TestMark(
("TestKernels", "test_scripted_vs_eager"),
pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %72')}:UserWarning"),
)
],
),
KernelInfo(
F.horizontal_flip_mask,
......@@ -2045,17 +2038,11 @@ def sample_inputs_convert_dtype_video():
yield ArgsKwargs(video_loader)
_common_convert_dtype_marks = [
TestMark(
("TestKernels", "test_dtype_and_device_consistency"),
pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"),
condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32),
),
TestMark(
("TestKernels", "test_scripted_vs_eager"),
pytest.mark.filterwarnings(f"ignore:{re.escape('operator() profile_node %')}:UserWarning"),
),
]
skip_dtype_consistency = TestMark(
("TestKernels", "test_dtype_and_device_consistency"),
pytest.mark.skip(reason="`convert_dtype_*` kernels convert the dtype by design"),
condition=lambda args_kwargs: args_kwargs.args[0].dtype != args_kwargs.kwargs.get("dtype", torch.float32),
)
KERNEL_INFOS.extend(
[
......@@ -2065,7 +2052,7 @@ KERNEL_INFOS.extend(
reference_fn=reference_convert_dtype_image_tensor,
reference_inputs_fn=reference_inputs_convert_dtype_image_tensor,
test_marks=[
*_common_convert_dtype_marks,
skip_dtype_consistency,
TestMark(
("TestKernels", "test_against_reference"),
pytest.mark.xfail(reason="Conversion overflows"),
......@@ -2083,7 +2070,9 @@ KERNEL_INFOS.extend(
KernelInfo(
F.convert_dtype_video,
sample_inputs_fn=sample_inputs_convert_dtype_video,
test_marks=_common_convert_dtype_marks,
test_marks=[
skip_dtype_consistency,
],
),
]
)
......
import math
import os
import re
import numpy as np
import PIL.Image
......@@ -26,6 +27,15 @@ def script(fn):
raise AssertionError(f"Trying to `torch.jit.script` '{fn.__name__}' raised the error above.") from error
# Scripting a function often triggers a warning like
# `UserWarning: operator() profile_node %$INT1 : int[] = prim::profile_ivalue($INT2) does not have profile information`
# with varying `INT1` and `INT2`. Since these are uninteresting for us and only clutter the test summary, we ignore
# them.
ignore_jit_warning_no_profile = pytest.mark.filterwarnings(
f"ignore:{re.escape('operator() profile_node %')}:UserWarning"
)
def make_info_args_kwargs_params(info, *, args_kwargs_fn, test_id=None):
args_kwargs = list(args_kwargs_fn(info))
idx_field_len = len(str(len(args_kwargs)))
......@@ -87,6 +97,7 @@ class TestKernels:
condition=lambda info: info.reference_fn is not None,
)
@ignore_jit_warning_no_profile
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_vs_eager(self, info, args_kwargs, device):
......@@ -218,6 +229,7 @@ class TestDispatchers:
condition=lambda info: features.Image in info.kernels,
)
@ignore_jit_warning_no_profile
@image_sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_smoke(self, info, args_kwargs, device):
......@@ -230,6 +242,7 @@ class TestDispatchers:
# TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke`
# replaces this test for them.
@ignore_jit_warning_no_profile
@pytest.mark.parametrize(
"dispatcher",
[
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment