Unverified Commit fb66ef81 authored by Pavel Iakubovskii's avatar Pavel Iakubovskii Committed by GitHub
Browse files

Update kwargs validation for `preprocess` with decorator (#32024)

* BLIP preprocess

* BIT preprocess

* BRIDGETOWER preprocess

* CHAMELEON preprocess

* CHINESE_CLIP preprocess

* CONVNEXT preprocess

* DEIT preprocess

* DONUT preprocess

* DPT preprocess

* FLAVA preprocess

* EFFICIENTNET preprocess

* FUYU preprocess

* GLPN preprocess

* IMAGEGPT preprocess

* INTRUCTBLIPVIDEO preprocess

* VIVIT preprocess

* ZOEDEPTH preprocess

* VITMATTE preprocess

* VIT preprocess

* VILT preprocess

* VIDEOMAE preprocess

* VIDEOLLAVA

* TVP processing

* TVP fixup

* SWIN2SR preprocess

* SIGLIP preprocess

* SAM preprocess

* RT-DETR preprocess

* PVT preprocess

* POOLFORMER preprocess

* PERCEIVER preprocess

* OWLVIT preprocess

* OWLV2 preprocess

* NOUGAT preprocess

* MOBILEVIT preprocess

* MOBILENETV2 preprocess

* MOBILENETV1 preprocess

* LEVIT preprocess

* LAYOUTLMV2 preprocess

* LAYOUTLMV3 preprocess

* Add test

* Update tests
parent e85d8639
......@@ -43,6 +43,7 @@ class BlipImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
......
......@@ -50,6 +50,7 @@ class BridgeTowerImageProcessingTester(unittest.TestCase):
max_resolution=400,
num_channels=3,
):
super().__init__()
self.parent = parent
self.do_resize = do_resize
self.size = size if size is not None else {"shortest_edge": 288}
......
......@@ -50,6 +50,7 @@ class ChameleonImageProcessingTester(unittest.TestCase):
image_std=[1.0, 1.0, 1.0],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -44,6 +44,7 @@ class ChineseCLIPImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -42,6 +42,7 @@ class ConvNextImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
self.parent = parent
self.batch_size = batch_size
......
......@@ -43,6 +43,7 @@ class DeiTImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
......
......@@ -51,6 +51,7 @@ class DonutImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -43,6 +43,7 @@ class DPTImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -43,6 +43,7 @@ class EfficientNetImageProcessorTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -76,6 +76,7 @@ class FlavaImageProcessingTester(unittest.TestCase):
codebook_image_mean=FLAVA_CODEBOOK_MEAN,
codebook_image_std=FLAVA_CODEBOOK_STD,
):
super().__init__()
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
......
......@@ -46,6 +46,7 @@ class GLPNImageProcessingTester(unittest.TestCase):
size_divisor=32,
do_rescale=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
......
......@@ -51,6 +51,7 @@ class ImageGPTImageProcessingTester(unittest.TestCase):
size=None,
do_normalize=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -50,6 +50,7 @@ class InstructBlipVideoProcessingTester(unittest.TestCase):
do_convert_rgb=True,
frames=4,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -41,6 +41,7 @@ class LayoutLMv2ImageProcessingTester(unittest.TestCase):
size=None,
apply_ocr=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -41,6 +41,7 @@ class LayoutLMv3ImageProcessingTester(unittest.TestCase):
size=None,
apply_ocr=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
......
......@@ -43,6 +43,7 @@ class LevitImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -40,6 +40,7 @@ class MobileNetV1ImageProcessingTester(unittest.TestCase):
do_center_crop=True,
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -40,6 +40,7 @@ class MobileNetV2ImageProcessingTester(unittest.TestCase):
do_center_crop=True,
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -48,6 +48,7 @@ class MobileViTImageProcessingTester(unittest.TestCase):
crop_size=None,
do_flip_channel_order=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
......
......@@ -53,6 +53,7 @@ class NougatImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment