presets.py 3.44 KB
Newer Older
1
2
from collections import defaultdict

3
import torch
4
5
6
7
8
9
10
11
12
13
14
15
16
17


def get_modules(use_v2):
    # We need a protected import to avoid the V2 warning in case just V1 is used
    if use_v2:
        import torchvision.datapoints
        import torchvision.transforms.v2
        import v2_extras

        return torchvision.transforms.v2, torchvision.datapoints, v2_extras
    else:
        import transforms

        return transforms, None, None
18
19
20


class SegmentationPresetTrain:
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    def __init__(
        self,
        *,
        base_size,
        crop_size,
        hflip_prob=0.5,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225),
        backend="pil",
        use_v2=False,
    ):
        T, datapoints, v2_extras = get_modules(use_v2)

        transforms = []
        backend = backend.lower()
        if backend == "datapoint":
            transforms.append(T.ToImageTensor())
        elif backend == "tensor":
            transforms.append(T.PILToTensor())
        elif backend != "pil":
            raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")

        transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
44
45

        if hflip_prob > 0:
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
            transforms += [T.RandomHorizontalFlip(hflip_prob)]

        if use_v2:
            # We need a custom pad transform here, since the padding we want to perform here is fundamentally
            # different from the padding in `RandomCrop` if `pad_if_needed=True`.
            transforms += [v2_extras.PadIfSmaller(crop_size, fill=defaultdict(lambda: 0, {datapoints.Mask: 255}))]

        transforms += [T.RandomCrop(crop_size)]

        if backend == "pil":
            transforms += [T.PILToTensor()]

        if use_v2:
            img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
            transforms += [
                T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
62
            ]
63
64
65
66
67
68
69
        else:
            # No need to explicitly convert masks as they're magically int64 already
            transforms += [T.ConvertImageDtype(torch.float)]

        transforms += [T.Normalize(mean=mean, std=std)]

        self.transforms = T.Compose(transforms)
70
71
72
73
74
75

    def __call__(self, img, target):
        return self.transforms(img, target)


class SegmentationPresetEval:
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    def __init__(
        self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
    ):
        T, _, _ = get_modules(use_v2)

        transforms = []
        backend = backend.lower()
        if backend == "tensor":
            transforms += [T.PILToTensor()]
        elif backend == "datapoint":
            transforms += [T.ToImageTensor()]
        elif backend != "pil":
            raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")

        if use_v2:
            transforms += [T.Resize(size=(base_size, base_size))]
        else:
            transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]

        if backend == "pil":
            # Note: we could just convert to pure tensors even in v2?
            transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]

        transforms += [
            T.ConvertImageDtype(torch.float),
            T.Normalize(mean=mean, std=std),
        ]
        self.transforms = T.Compose(transforms)
104
105
106

    def __call__(self, img, target):
        return self.transforms(img, target)