presets.py 3.5 KB
Newer Older
1
import torch
2
3
4
5
6
7


def get_modules(use_v2):
    # We need a protected import to avoid the V2 warning in case just V1 is used
    if use_v2:
        import torchvision.transforms.v2
8
        import torchvision.tv_tensors
9
10
        import v2_extras

11
        return torchvision.transforms.v2, torchvision.tv_tensors, v2_extras
12
13
14
15
    else:
        import transforms

        return transforms, None, None
16
17
18


class SegmentationPresetTrain:
19
20
21
22
23
24
25
26
27
28
29
    def __init__(
        self,
        *,
        base_size,
        crop_size,
        hflip_prob=0.5,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225),
        backend="pil",
        use_v2=False,
    ):
30
        T, tv_tensors, v2_extras = get_modules(use_v2)
31
32
33

        transforms = []
        backend = backend.lower()
34
        if backend == "tv_tensor":
35
            transforms.append(T.ToImage())
36
37
38
        elif backend == "tensor":
            transforms.append(T.PILToTensor())
        elif backend != "pil":
39
            raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
40
41

        transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
42
43

        if hflip_prob > 0:
44
45
46
47
48
            transforms += [T.RandomHorizontalFlip(hflip_prob)]

        if use_v2:
            # We need a custom pad transform here, since the padding we want to perform here is fundamentally
            # different from the padding in `RandomCrop` if `pad_if_needed=True`.
49
            transforms += [v2_extras.PadIfSmaller(crop_size, fill={tv_tensors.Mask: 255, "others": 0})]
50
51
52
53
54
55
56

        transforms += [T.RandomCrop(crop_size)]

        if backend == "pil":
            transforms += [T.PILToTensor()]

        if use_v2:
57
            img_type = tv_tensors.Image if backend == "tv_tensor" else torch.Tensor
58
            transforms += [
59
                T.ToDtype(dtype={img_type: torch.float32, tv_tensors.Mask: torch.int64, "others": None}, scale=True)
60
            ]
61
62
        else:
            # No need to explicitly convert masks as they're magically int64 already
63
            transforms += [T.ToDtype(torch.float, scale=True)]
64
65

        transforms += [T.Normalize(mean=mean, std=std)]
66
67
        if use_v2:
            transforms += [T.ToPureTensor()]
68
69

        self.transforms = T.Compose(transforms)
70
71
72
73
74
75

    def __call__(self, img, target):
        return self.transforms(img, target)


class SegmentationPresetEval:
76
77
78
79
80
81
82
83
84
    def __init__(
        self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
    ):
        T, _, _ = get_modules(use_v2)

        transforms = []
        backend = backend.lower()
        if backend == "tensor":
            transforms += [T.PILToTensor()]
85
        elif backend == "tv_tensor":
86
            transforms += [T.ToImage()]
87
        elif backend != "pil":
88
            raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
89
90
91
92
93
94
95
96

        if use_v2:
            transforms += [T.Resize(size=(base_size, base_size))]
        else:
            transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]

        if backend == "pil":
            # Note: we could just convert to pure tensors even in v2?
97
            transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
98
99

        transforms += [
100
            T.ToDtype(torch.float, scale=True),
101
102
            T.Normalize(mean=mean, std=std),
        ]
103
104
105
        if use_v2:
            transforms += [T.ToPureTensor()]

106
        self.transforms = T.Compose(transforms)
107
108
109

    def __call__(self, img, target):
        return self.transforms(img, target)