"tests/models/siglip/test_tokenization_siglip.py" did not exist on "624495706c42932ca226419bfe980abdfe644e14"
test_image_processing_fuyu.py 2.32 KB
Newer Older
Pablo Montalvo's avatar
Pablo Montalvo committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import unittest

import numpy as np

from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import (
    require_torch,
    require_torchvision,
    require_vision,
)


if is_torch_available() and is_vision_available():
    import torch

    from transformers import FuyuImageProcessor

if is_vision_available():
    from PIL import Image


@require_torch
@require_vision
@require_torchvision
class TestFuyuImageProcessor(unittest.TestCase):
    def setUp(self):
        self.processor = FuyuImageProcessor(target_height=160, target_width=320, padding_value=1.0)
        self.batch_size = 3
        self.channels = 3
        self.height = 300
        self.width = 300

        self.image_input = torch.rand(self.batch_size, self.channels, self.height, self.width)

        self.image_patch_dim_h = 30
        self.image_patch_dim_w = 30
        self.sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
        self.sample_image_pil = Image.fromarray(self.sample_image)

    def test_patches(self):
        expected_num_patches = self.processor.get_num_patches(
            img_h=self.height, img_w=self.width, patch_dim_h=self.image_patch_dim_h, patch_dim_w=self.image_patch_dim_w
        )

        patches_final = self.processor.patchify_image(
            image=self.image_input, patch_dim_h=self.image_patch_dim_h, patch_dim_w=self.image_patch_dim_w
        )
        assert (
            patches_final.shape[1] == expected_num_patches
        ), f"Expected {expected_num_patches} patches, got {patches_final.shape[1]}."

    def test_scale_to_target_aspect_ratio(self):
53
        # (h:450, w:210) fitting (160, 320) -> (160, 210*160/450)
Pablo Montalvo's avatar
Pablo Montalvo committed
54
        scaled_image = self.processor._scale_to_target_aspect_ratio(self.sample_image)
55
56
        self.assertEqual(scaled_image.shape[0], 160)
        self.assertEqual(scaled_image.shape[1], 74)
Pablo Montalvo's avatar
Pablo Montalvo committed
57
58
59
60
61
62
63
64
65
66

    def test_apply_transformation_numpy(self):
        transformed_image = self.processor.apply_transformation(self.sample_image)
        self.assertEqual(transformed_image.shape[0], 160)
        self.assertEqual(transformed_image.shape[1], 320)

    def test_apply_transformation_pil(self):
        transformed_image = self.processor.apply_transformation(self.sample_image_pil)
        self.assertEqual(transformed_image.shape[0], 160)
        self.assertEqual(transformed_image.shape[1], 320)