"docs/vscode:/vscode.git/clone" did not exist on "60323e08057d36b617f11d3c4958d342a44d0342"
presets.py 3.86 KB
Newer Older
1
2
from collections import defaultdict

3
import torch
4
5
6
7
8
9
10
import transforms as reference_transforms


def get_modules(use_v2):
    # We need a protected import to avoid the V2 warning in case just V1 is used
    if use_v2:
        import torchvision.transforms.v2
11
        import torchvision.tv_tensors
12

13
        return torchvision.transforms.v2, torchvision.tv_tensors
14
15
    else:
        return reference_transforms, None
16
17
18


class DetectionPresetTrain:
19
20
21
22
23
24
25
26
27
28
29
30
    # Note: this transform assumes that the input to forward() are always PIL
    # images, regardless of the backend parameter.
    def __init__(
        self,
        *,
        data_augmentation,
        hflip_prob=0.5,
        mean=(123.0, 117.0, 104.0),
        backend="pil",
        use_v2=False,
    ):

31
        T, tv_tensors = get_modules(use_v2)
32
33
34

        transforms = []
        backend = backend.lower()
35
        if backend == "tv_tensor":
36
            transforms.append(T.ToImage())
37
38
39
        elif backend == "tensor":
            transforms.append(T.PILToTensor())
        elif backend != "pil":
40
            raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
41

42
        if data_augmentation == "hflip":
43
            transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
44
        elif data_augmentation == "lsj":
45
46
47
48
49
50
            transforms += [
                T.ScaleJitter(target_size=(1024, 1024), antialias=True),
                # TODO: FixedSizeCrop below doesn't work on tensors!
                reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
                T.RandomHorizontalFlip(p=hflip_prob),
            ]
51
        elif data_augmentation == "multiscale":
52
53
54
55
            transforms += [
                T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
                T.RandomHorizontalFlip(p=hflip_prob),
            ]
56
        elif data_augmentation == "ssd":
57
            fill = defaultdict(lambda: mean, {tv_tensors.Mask: 0}) if use_v2 else list(mean)
58
59
60
61
62
63
            transforms += [
                T.RandomPhotometricDistort(),
                T.RandomZoomOut(fill=fill),
                T.RandomIoUCrop(),
                T.RandomHorizontalFlip(p=hflip_prob),
            ]
64
        elif data_augmentation == "ssdlite":
65
66
67
68
            transforms += [
                T.RandomIoUCrop(),
                T.RandomHorizontalFlip(p=hflip_prob),
            ]
69
70
        else:
            raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
71

72
73
        if backend == "pil":
            # Note: we could just convert to pure tensors even in v2.
74
            transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
75

76
        transforms += [T.ToDtype(torch.float, scale=True)]
77
78
79

        if use_v2:
            transforms += [
80
                T.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.XYXY),
81
                T.SanitizeBoundingBoxes(),
82
                T.ToPureTensor(),
83
84
85
86
            ]

        self.transforms = T.Compose(transforms)

87
88
89
90
91
    def __call__(self, img, target):
        return self.transforms(img, target)


class DetectionPresetEval:
92
93
94
95
96
97
    def __init__(self, backend="pil", use_v2=False):
        T, _ = get_modules(use_v2)
        transforms = []
        backend = backend.lower()
        if backend == "pil":
            # Note: we could just convert to pure tensors even in v2?
98
            transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
99
100
        elif backend == "tensor":
            transforms += [T.PILToTensor()]
101
        elif backend == "tv_tensor":
102
            transforms += [T.ToImage()]
103
        else:
104
            raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
105

106
        transforms += [T.ToDtype(torch.float, scale=True)]
107
108
109
110

        if use_v2:
            transforms += [T.ToPureTensor()]

111
        self.transforms = T.Compose(transforms)
112
113
114

    def __call__(self, img, target):
        return self.transforms(img, target)