"vscode:/vscode.git/clone" did not exist on "a364e6cad24119e036e8cd4aa8dcc24fbb84ee6d"
test_forward_pass.py 7.41 KB
Newer Older
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1
2
3
4
5
6
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

7
import os
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
8
9
10
import unittest

import torch
11
from omegaconf import DictConfig, OmegaConf
12
from pytorch3d.implicitron.models.generic_model import GenericModel
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
13
from pytorch3d.implicitron.models.renderer.base import EvaluationMode
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
14
from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
15
from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
16
17


18
19
20
21
22
23
24
25
26
27
if os.environ.get("FB_TEST", False):
    from common_testing import get_pytorch3d_dir
else:
    from tests.common_testing import get_pytorch3d_dir

IMPLICITRON_CONFIGS_DIR = (
    get_pytorch3d_dir() / "projects" / "implicitron_trainer" / "configs"
)


Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
28
class TestGenericModel(unittest.TestCase):
29
30
31
    def setUp(self):
        torch.manual_seed(42)

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
32
    def test_gm(self):
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
33
        # Simple test of a forward and backward pass of the default GenericModel.
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
34
35
        device = torch.device("cuda:1")
        expand_args_fields(GenericModel)
36
        model = GenericModel(render_image_height=80, render_image_width=80)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
37
        model.to(device)
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
        self._one_model_test(model, device)

    def test_all_gm_configs(self):
        # Tests all model settings in the implicitron_trainer config folder.
        device = torch.device("cuda:0")
        config_files = []

        for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
            config_files.extend(
                [
                    f
                    for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
                    if not f.name.endswith("_base.yaml")
                ]
            )

        for config_file in config_files:
            with self.subTest(name=config_file.stem):
                cfg = _load_model_config_from_yaml(str(config_file))
                model = GenericModel(**cfg)
                model.to(device)
David Novotny's avatar
David Novotny committed
59
60
61
62
63
64
                self._one_model_test(
                    model,
                    device,
                    eval_test=True,
                    bw_test=True,
                )
65
66
67
68
69
70
71

    def _one_model_test(
        self,
        model,
        device,
        n_train_cameras: int = 5,
        eval_test: bool = True,
David Novotny's avatar
David Novotny committed
72
        bw_test: bool = True,
73
    ):
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
74
75
76
77

        R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
        cameras = PerspectiveCameras(R=R, T=T, device=device)

78
79
80
81
82
83
84
85
86
        N, H, W = n_train_cameras, model.render_image_height, model.render_image_width

        random_args = {
            "camera": cameras,
            "fg_probability": _random_input_tensor(N, 1, H, W, True, device),
            "depth_map": _random_input_tensor(N, 1, H, W, False, device) + 0.1,
            "mask_crop": _random_input_tensor(N, 1, H, W, True, device),
            "sequence_name": ["sequence"] * N,
            "image_rgb": _random_input_tensor(N, 3, H, W, False, device),
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
87
88
        }

89
90
        # training foward pass
        model.train()
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
91
        train_preds = model(
92
            **random_args,
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
93
94
            evaluation_mode=EvaluationMode.TRAINING,
        )
David Novotny's avatar
David Novotny committed
95
96
97
98
99
100
        self.assertTrue(
            train_preds["objective"].isfinite().item()
        )  # check finiteness of the objective

        if bw_test:
            train_preds["objective"].backward()
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
101

102
103
104
        if eval_test:
            model.eval()
            with torch.no_grad():
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
105
                eval_preds = model(
106
107
108
109
110
111
                    **random_args,
                    evaluation_mode=EvaluationMode.EVALUATION,
                )
                self.assertEqual(
                    eval_preds["images_render"].shape,
                    (1, 3, model.render_image_height, model.render_image_width),
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
112
                )
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150

    def test_idr(self):
        # Forward pass of GenericModel with IDR.
        device = torch.device("cuda:1")
        args = get_default_args(GenericModel)
        args.renderer_class_type = "SignedDistanceFunctionRenderer"
        args.implicit_function_class_type = "IdrFeatureField"
        args.implicit_function_IdrFeatureField_args.n_harmonic_functions_xyz = 6

        model = GenericModel(**args)
        model.to(device)

        n_train_cameras = 2
        R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
        cameras = PerspectiveCameras(R=R, T=T, device=device)

        defaulted_args = {
            "depth_map": None,
            "mask_crop": None,
            "sequence_name": None,
        }

        target_image_rgb = torch.rand(
            (n_train_cameras, 3, model.render_image_height, model.render_image_width),
            device=device,
        )
        fg_probability = torch.rand(
            (n_train_cameras, 1, model.render_image_height, model.render_image_width),
            device=device,
        )
        train_preds = model(
            camera=cameras,
            evaluation_mode=EvaluationMode.TRAINING,
            image_rgb=target_image_rgb,
            fg_probability=fg_probability,
            **defaulted_args,
        )
        self.assertGreater(train_preds["objective"].item(), 0)
151

152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    def test_viewpool(self):
        device = torch.device("cuda:1")
        args = get_default_args(GenericModel)
        args.view_pooler_enabled = True
        args.image_feature_extractor_ResNetFeatureExtractor_args.add_masks = False
        model = GenericModel(**args)
        model.to(device)

        n_train_cameras = 2
        R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
        cameras = PerspectiveCameras(R=R, T=T, device=device)

        defaulted_args = {
            "fg_probability": None,
            "depth_map": None,
            "mask_crop": None,
        }

        target_image_rgb = torch.rand(
            (n_train_cameras, 3, model.render_image_height, model.render_image_width),
            device=device,
        )
        train_preds = model(
            camera=cameras,
            evaluation_mode=EvaluationMode.TRAINING,
            image_rgb=target_image_rgb,
            sequence_name=["a"] * n_train_cameras,
            **defaulted_args,
        )
        self.assertGreater(train_preds["objective"].item(), 0)

183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222

def _random_input_tensor(
    N: int,
    C: int,
    H: int,
    W: int,
    is_binary: bool,
    device: torch.device,
) -> torch.Tensor:
    T = torch.rand(N, C, H, W, device=device)
    if is_binary:
        T = (T > 0.5).float()
    return T


def _load_model_config_from_yaml(config_path, strict=True) -> DictConfig:
    default_cfg = get_default_args(GenericModel)
    cfg = _load_model_config_from_yaml_rec(default_cfg, config_path)
    return cfg


def _load_model_config_from_yaml_rec(cfg: DictConfig, config_path: str) -> DictConfig:
    cfg_loaded = OmegaConf.load(config_path)
    if "generic_model_args" in cfg_loaded:
        cfg_model_loaded = cfg_loaded.generic_model_args
    else:
        cfg_model_loaded = None
    defaults = cfg_loaded.pop("defaults", None)
    if defaults is not None:
        for default_name in defaults:
            if default_name in ("_self_", "default_config"):
                continue
            default_name = os.path.splitext(default_name)[0]
            defpath = os.path.join(os.path.dirname(config_path), default_name + ".yaml")
            cfg = _load_model_config_from_yaml_rec(cfg, defpath)
            if cfg_model_loaded is not None:
                cfg = OmegaConf.merge(cfg, cfg_model_loaded)
    elif cfg_model_loaded is not None:
        cfg = OmegaConf.merge(cfg, cfg_model_loaded)
    return cfg