Unverified Commit fb66ef81 authored by Pavel Iakubovskii's avatar Pavel Iakubovskii Committed by GitHub
Browse files

Update kwargs validation for `preprocess` with decorator (#32024)

* BLIP preprocess

* BIT preprocess

* BRIDGETOWER preprocess

* CHAMELEON preprocess

* CHINESE_CLIP preprocess

* CONVNEXT preprocess

* DEIT preprocess

* DONUT preprocess

* DPT preprocess

* FLAVA preprocess

* EFFICIENTNET preprocess

* FUYU preprocess

* GLPN preprocess

* IMAGEGPT preprocess

* INTRUCTBLIPVIDEO preprocess

* VIVIT preprocess

* ZOEDEPTH preprocess

* VITMATTE preprocess

* VIT preprocess

* VILT preprocess

* VIDEOMAE preprocess

* VIDEOLLAVA

* TVP processing

* TVP fixup

* SWIN2SR preprocess

* SIGLIP preprocess

* SAM preprocess

* RT-DETR preprocess

* PVT preprocess

* POOLFORMER preprocess

* PERCEIVER preprocess

* OWLVIT preprocess

* OWLV2 preprocess

* NOUGAT preprocess

* MOBILEVIT preprocess

* MOBILENETV2 preprocess

* MOBILENETV1 preprocess

* LEVIT preprocess

* LAYOUTLMV2 preprocess

* LAYOUTLMV3 preprocess

* Add test

* Update tests
parent e85d8639
......@@ -47,6 +47,7 @@ class Owlv2ImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -44,6 +44,7 @@ class OwlViTImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -41,6 +41,7 @@ class PoolFormerImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 30}
crop_size = crop_size if crop_size is not None else {"height": 30, "width": 30}
self.parent = parent
......
......@@ -41,6 +41,7 @@ class PvtImageProcessingTester(unittest.TestCase):
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -45,6 +45,7 @@ class RTDetrImageProcessingTester(unittest.TestCase):
do_pad=False,
return_tensors="pt",
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -43,6 +43,7 @@ class SiglipImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -48,6 +48,7 @@ class Swin2SRImageProcessingTester(unittest.TestCase):
do_pad=True,
pad_size=8,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -58,6 +58,7 @@ class TvpImageProcessingTester(unittest.TestCase):
num_channels=3,
num_frames=2,
):
super().__init__()
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
......
......@@ -52,6 +52,7 @@ class VideoLlavaImageProcessingTester(unittest.TestCase):
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -50,6 +50,7 @@ class VideoMAEImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
......
......@@ -46,6 +46,7 @@ class ViltImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 30}
self.parent = parent
self.batch_size = batch_size
......
......@@ -44,6 +44,7 @@ class ViTImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -15,6 +15,7 @@
import unittest
import warnings
import numpy as np
......@@ -51,6 +52,7 @@ class VitMatteImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......@@ -197,3 +199,20 @@ class VitMatteImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image = np.random.randn(3, 249, 512)
images = image_processing.pad_image(image)
assert images.shape == (3, 256, 512)
def test_image_processor_preprocess_arguments(self):
# vitmatte require additional trimap input for image_processor
# that is why we override original common test
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
image = self.image_processor_tester.prepare_image_inputs()[0]
trimap = np.random.randint(0, 3, size=image.size[::-1])
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
image_processor(image, trimaps=trimap, extra_argument=True)
messages = " ".join([str(w.message) for w in raised_warnings])
self.assertGreaterEqual(len(raised_warnings), 1)
self.assertIn("extra_argument", messages)
......@@ -50,6 +50,7 @@ class VivitImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
......
......@@ -46,6 +46,7 @@ class ZoeDepthImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
do_pad=False,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -19,6 +19,7 @@ import os
import pathlib
import tempfile
import time
import warnings
import numpy as np
import requests
......@@ -425,8 +426,12 @@ class ImageProcessingTestMixin:
)
def test_image_processor_preprocess_arguments(self):
is_tested = False
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
# validation done by _valid_processor_keys attribute
if hasattr(image_processor, "_valid_processor_keys") and hasattr(image_processor, "preprocess"):
preprocess_parameter_names = inspect.getfullargspec(image_processor.preprocess).args
preprocess_parameter_names.remove("self")
......@@ -434,6 +439,28 @@ class ImageProcessingTestMixin:
valid_processor_keys = image_processor._valid_processor_keys
valid_processor_keys.sort()
self.assertEqual(preprocess_parameter_names, valid_processor_keys)
is_tested = True
# validation done by @filter_out_non_signature_kwargs decorator
if hasattr(image_processor.preprocess, "_filter_out_non_signature_kwargs"):
if hasattr(self.image_processor_tester, "prepare_image_inputs"):
inputs = self.image_processor_tester.prepare_image_inputs()
elif hasattr(self.image_processor_tester, "prepare_video_inputs"):
inputs = self.image_processor_tester.prepare_video_inputs()
else:
self.skipTest(reason="No valid input preparation method found")
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
image_processor(inputs, extra_argument=True)
messages = " ".join([str(w.message) for w in raised_warnings])
self.assertGreaterEqual(len(raised_warnings), 1)
self.assertIn("extra_argument", messages)
is_tested = True
if not is_tested:
self.skipTest(reason="No validation found for `preprocess` method")
class AnnotationFormatTestMixin:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment