presets.py 3.79 KB
Newer Older
1
import torch
2
from torchvision.transforms.functional import InterpolationMode
3
4


5
6
7
8
9
10
11
12
13
14
15
16
def get_module(use_v2):
    # We need a protected import to avoid the V2 warning in case just V1 is used
    if use_v2:
        import torchvision.transforms.v2

        return torchvision.transforms.v2
    else:
        import torchvision.transforms

        return torchvision.transforms


17
class ClassificationPresetTrain:
18
19
20
    # Note: this transform assumes that the input to forward() are always PIL
    # images, regardless of the backend parameter. We may change that in the
    # future though, if we change the output type from the dataset.
21
22
    def __init__(
        self,
23
        *,
24
25
26
        crop_size,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225),
27
        interpolation=InterpolationMode.BILINEAR,
28
29
        hflip_prob=0.5,
        auto_augment_policy=None,
Ponku's avatar
Ponku committed
30
31
        ra_magnitude=9,
        augmix_severity=3,
32
        random_erase_prob=0.0,
33
        backend="pil",
34
        use_v2=False,
35
    ):
36
        T = get_module(use_v2)
37
38

        transforms = []
39
40
        backend = backend.lower()
        if backend == "tensor":
41
            transforms.append(T.PILToTensor())
42
43
44
        elif backend != "pil":
            raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")

45
        transforms.append(T.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
46
        if hflip_prob > 0:
47
            transforms.append(T.RandomHorizontalFlip(hflip_prob))
48
        if auto_augment_policy is not None:
49
            if auto_augment_policy == "ra":
50
                transforms.append(T.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
51
            elif auto_augment_policy == "ta_wide":
52
                transforms.append(T.TrivialAugmentWide(interpolation=interpolation))
53
            elif auto_augment_policy == "augmix":
54
                transforms.append(T.AugMix(interpolation=interpolation, severity=augmix_severity))
55
            else:
56
57
                aa_policy = T.AutoAugmentPolicy(auto_augment_policy)
                transforms.append(T.AutoAugment(policy=aa_policy, interpolation=interpolation))
58
59

        if backend == "pil":
60
            transforms.append(T.PILToTensor())
61

62
        transforms.extend(
63
            [
64
                T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
65
                T.Normalize(mean=mean, std=std),
66
67
            ]
        )
68
        if random_erase_prob > 0:
69
            transforms.append(T.RandomErasing(p=random_erase_prob))
70

71
72
73
        if use_v2:
            transforms.append(T.ToPureTensor())

74
        self.transforms = T.Compose(transforms)
75
76
77
78
79
80

    def __call__(self, img):
        return self.transforms(img)


class ClassificationPresetEval:
81
82
    def __init__(
        self,
83
        *,
84
85
86
87
88
        crop_size,
        resize_size=256,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225),
        interpolation=InterpolationMode.BILINEAR,
89
        backend="pil",
90
        use_v2=False,
91
    ):
92
        T = get_module(use_v2)
93
        transforms = []
94
95
        backend = backend.lower()
        if backend == "tensor":
96
            transforms.append(T.PILToTensor())
97
        elif backend != "pil":
98
99
            raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")

100
        transforms += [
101
102
            T.Resize(resize_size, interpolation=interpolation, antialias=True),
            T.CenterCrop(crop_size),
103
104
105
        ]

        if backend == "pil":
106
            transforms.append(T.PILToTensor())
107

108
        transforms += [
109
            T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
110
            T.Normalize(mean=mean, std=std),
111
112
        ]

113
114
115
        if use_v2:
            transforms.append(T.ToPureTensor())

116
        self.transforms = T.Compose(transforms)
117
118
119

    def __call__(self, img):
        return self.transforms(img)