common_testing.py 7.4 KB
Newer Older
1
# Copyright (c) Meta Platforms, Inc. and affiliates.
Patrick Labatut's avatar
Patrick Labatut committed
2
3
4
5
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
facebook-github-bot's avatar
facebook-github-bot committed
6

7
import os
facebook-github-bot's avatar
facebook-github-bot committed
8
import unittest
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
9
from numbers import Real
Nikhila Ravi's avatar
Nikhila Ravi committed
10
from pathlib import Path
Roman Shapovalov's avatar
Roman Shapovalov committed
11
from typing import Callable, Optional, Union
12
13

import numpy as np
facebook-github-bot's avatar
facebook-github-bot committed
14
import torch
Nikhila Ravi's avatar
Nikhila Ravi committed
15
16
17
from PIL import Image


18
19
20
21
22
23
24
25
26
27
def get_tests_dir() -> Path:
    """
    Returns Path for the directory containing this file.
    """
    return Path(__file__).resolve().parent


def get_pytorch3d_dir() -> Path:
    """
    Returns Path for the root PyTorch3D directory.
28

29
    Meta internal systems need a special case here.
30
    """
31
32
    if os.environ.get("INSIDE_RE_WORKER") is not None:
        return Path(__file__).resolve().parent
33
34
    elif os.environ.get("CONDA_BUILD_STATE", "") == "TEST":
        return Path(os.environ["SRC_DIR"])
35
36
    else:
        return Path(__file__).resolve().parent.parent
37
38


Nikhila Ravi's avatar
Nikhila Ravi committed
39
def load_rgb_image(filename: str, data_dir: Union[str, Path]):
Nikhila Ravi's avatar
Nikhila Ravi committed
40
    filepath = os.path.join(data_dir, filename)
Nikhila Ravi's avatar
Nikhila Ravi committed
41
42
43
44
    with Image.open(filepath) as raw_image:
        image = torch.from_numpy(np.array(raw_image) / 255.0)
    image = image.to(dtype=torch.float32)
    return image[..., :3]
facebook-github-bot's avatar
facebook-github-bot committed
45
46


Roman Shapovalov's avatar
Roman Shapovalov committed
47
48
49
TensorOrArray = Union[torch.Tensor, np.ndarray]


Nikhila Ravi's avatar
Nikhila Ravi committed
50
51
52
53
54
55
56
57
def get_random_cuda_device() -> str:
    """
    Function to get a random GPU device from the
    available devices. This is useful for testing
    that custom cuda kernels can support inputs on
    any device without having to set the device explicitly.
    """
    num_devices = torch.cuda.device_count()
Nikhila Ravi's avatar
Nikhila Ravi committed
58
59
60
61
    device_id = (
        torch.randint(high=num_devices, size=(1,)).item() if num_devices > 1 else 0
    )
    return "cuda:%d" % device_id
Nikhila Ravi's avatar
Nikhila Ravi committed
62
63


facebook-github-bot's avatar
facebook-github-bot committed
64
65
66
67
68
class TestCaseMixin(unittest.TestCase):
    def assertSeparate(self, tensor1, tensor2) -> None:
        """
        Verify that tensor1 and tensor2 have their data in distinct locations.
        """
69
        self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
facebook-github-bot's avatar
facebook-github-bot committed
70

Georgia Gkioxari's avatar
Georgia Gkioxari committed
71
72
73
74
    def assertNotSeparate(self, tensor1, tensor2) -> None:
        """
        Verify that tensor1 and tensor2 have their data in the same locations.
        """
75
        self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
Georgia Gkioxari's avatar
Georgia Gkioxari committed
76

facebook-github-bot's avatar
facebook-github-bot committed
77
78
79
80
81
82
83
84
    def assertAllSeparate(self, tensor_list) -> None:
        """
        Verify that all tensors in tensor_list have their data in
        distinct locations.
        """
        ptrs = [i.storage().data_ptr() for i in tensor_list]
        self.assertCountEqual(ptrs, set(ptrs))

Roman Shapovalov's avatar
Roman Shapovalov committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    def assertNormsClose(
        self,
        input: TensorOrArray,
        other: TensorOrArray,
        norm_fn: Callable[[TensorOrArray], TensorOrArray],
        *,
        rtol: float = 1e-05,
        atol: float = 1e-08,
        equal_nan: bool = False,
        msg: Optional[str] = None,
    ) -> None:
        """
        Verifies that two tensors or arrays have the same shape and are close
            given absolute and relative tolerance; raises AssertionError otherwise.
            A custom norm function is computed before comparison. If no such pre-
            processing needed, pass `torch.abs` or, equivalently, call `assertClose`.
        Args:
            input, other: two tensors or two arrays.
            norm_fn: The function evaluates
                `all(norm_fn(input - other) <= atol + rtol * norm_fn(other))`.
                norm_fn is a tensor -> tensor function; the output has:
                    * all entries non-negative,
                    * shape defined by the input shape only.
            rtol, atol, equal_nan: as for torch.allclose.
            msg: message in case the assertion is violated.
        Note:
            Optional arguments here are all keyword-only, to avoid confusion
            with msg arguments on other assert functions.
        """

        self.assertEqual(np.shape(input), np.shape(other))

        diff = norm_fn(input - other)
        other_ = norm_fn(other)

Patrick Labatut's avatar
Patrick Labatut committed
120
        # We want to generalize allclose(input, output), which is essentially
Roman Shapovalov's avatar
Roman Shapovalov committed
121
122
123
124
125
126
127
128
        #  all(diff <= atol + rtol * other)
        # but with a sophisticated handling non-finite values.
        # We work that around by calling allclose() with the following arguments:
        # allclose(diff + other_, other_). This computes what we want because
        #  all(|diff + other_ - other_| <= atol + rtol * |other_|) ==
        #    all(|norm_fn(input - other)| <= atol + rtol * |norm_fn(other)|) ==
        #    all(norm_fn(input - other) <= atol + rtol * norm_fn(other)).

129
        self.assertClose(
130
            diff + other_, other_, rtol=rtol, atol=atol, equal_nan=equal_nan, msg=msg
Roman Shapovalov's avatar
Roman Shapovalov committed
131
132
        )

facebook-github-bot's avatar
facebook-github-bot committed
133
134
    def assertClose(
        self,
Roman Shapovalov's avatar
Roman Shapovalov committed
135
136
        input: TensorOrArray,
        other: TensorOrArray,
facebook-github-bot's avatar
facebook-github-bot committed
137
138
139
        *,
        rtol: float = 1e-05,
        atol: float = 1e-08,
Roman Shapovalov's avatar
Roman Shapovalov committed
140
141
        equal_nan: bool = False,
        msg: Optional[str] = None,
facebook-github-bot's avatar
facebook-github-bot committed
142
143
    ) -> None:
        """
Roman Shapovalov's avatar
Roman Shapovalov committed
144
145
146
147
        Verifies that two tensors or arrays have the same shape and are close
            given absolute and relative tolerance, i.e. checks
            `all(|input - other| <= atol + rtol * |other|)`;
            raises AssertionError otherwise.
facebook-github-bot's avatar
facebook-github-bot committed
148
149
150
        Args:
            input, other: two tensors or two arrays.
            rtol, atol, equal_nan: as for torch.allclose.
Roman Shapovalov's avatar
Roman Shapovalov committed
151
            msg: message in case the assertion is violated.
facebook-github-bot's avatar
facebook-github-bot committed
152
153
154
155
156
157
158
        Note:
            Optional arguments here are all keyword-only, to avoid confusion
            with msg arguments on other assert functions.
        """

        self.assertEqual(np.shape(input), np.shape(other))

Roman Shapovalov's avatar
Roman Shapovalov committed
159
160
161
162
163
        backend = torch if torch.is_tensor(input) else np
        close = backend.allclose(
            input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
        )

164
165
166
        if close:
            return

167
168
169
170
171
172
173
174
175
176
177
        # handle bool case
        if backend == torch and input.dtype == torch.bool:
            diff = (input != other).float()
            ratio = diff
        if backend == np and input.dtype == bool:
            diff = (input != other).astype(float)
            ratio = diff
        else:
            diff = backend.abs(input + 0.0 - other)
            ratio = diff / backend.abs(other)

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        try_relative = (diff <= atol) | (backend.isfinite(ratio) & (ratio > 0))
        if try_relative.all():
            if backend == np:
                # Avoid a weirdness with zero dimensional arrays.
                ratio = np.array(ratio)
            ratio[diff <= atol] = 0
            extra = f" Max relative diff {ratio.max()}"
        else:
            extra = ""
        shape = tuple(input.shape)
        loc = np.unravel_index(int(diff.argmax()), shape)
        max_diff = diff.max()
        err = f"Not close. Max diff {max_diff}.{extra} Shape {shape}. At {loc}."
        if msg is not None:
            self.fail(f"{msg} {err}")
        self.fail(err)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
194

195
196
197
    def assertConstant(
        self, input: TensorOrArray, value: Real, *, atol: float = 0
    ) -> None:
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
198
199
200
201
202
        """
        Asserts input is entirely filled with value.

        Args:
            input: tensor or array
203
204
            value: expected value
            atol: tolerance
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
205
        """
206
207
208
209
210
211
212
213
        mn, mx = input.min(), input.max()
        msg = f"values in range [{mn}, {mx}], not {value}, shape {input.shape}"
        if atol == 0:
            self.assertEqual(input.min(), value, msg=msg)
            self.assertEqual(input.max(), value, msg=msg)
        else:
            self.assertGreater(input.min(), value - atol, msg=msg)
            self.assertLess(input.max(), value + atol, msg=msg)