test_forward_pass.py 6.09 KB
Newer Older
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1
2
3
4
5
6
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

7
import os
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
8
9
10
import unittest

import torch
11
from omegaconf import DictConfig, OmegaConf
12
from pytorch3d.implicitron.models.generic_model import GenericModel
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
13
from pytorch3d.implicitron.models.renderer.base import EvaluationMode
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
14
from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
15
from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
16
17


18
19
20
21
22
23
24
25
26
27
if os.environ.get("FB_TEST", False):
    from common_testing import get_pytorch3d_dir
else:
    from tests.common_testing import get_pytorch3d_dir

IMPLICITRON_CONFIGS_DIR = (
    get_pytorch3d_dir() / "projects" / "implicitron_trainer" / "configs"
)


Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
28
class TestGenericModel(unittest.TestCase):
29
30
31
    def setUp(self):
        torch.manual_seed(42)

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
32
    def test_gm(self):
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
33
        # Simple test of a forward and backward pass of the default GenericModel.
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
34
35
36
37
        device = torch.device("cuda:1")
        expand_args_fields(GenericModel)
        model = GenericModel()
        model.to(device)
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
        self._one_model_test(model, device)

    def test_all_gm_configs(self):
        # Tests all model settings in the implicitron_trainer config folder.
        device = torch.device("cuda:0")
        config_files = []

        for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
            config_files.extend(
                [
                    f
                    for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
                    if not f.name.endswith("_base.yaml")
                ]
            )

        for config_file in config_files:
            with self.subTest(name=config_file.stem):
                cfg = _load_model_config_from_yaml(str(config_file))
                model = GenericModel(**cfg)
                model.to(device)
                self._one_model_test(model, device, eval_test=True)

    def _one_model_test(
        self,
        model,
        device,
        n_train_cameras: int = 5,
        eval_test: bool = True,
    ):
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
68
69
70
71

        R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
        cameras = PerspectiveCameras(R=R, T=T, device=device)

72
73
74
75
76
77
78
79
80
        N, H, W = n_train_cameras, model.render_image_height, model.render_image_width

        random_args = {
            "camera": cameras,
            "fg_probability": _random_input_tensor(N, 1, H, W, True, device),
            "depth_map": _random_input_tensor(N, 1, H, W, False, device) + 0.1,
            "mask_crop": _random_input_tensor(N, 1, H, W, True, device),
            "sequence_name": ["sequence"] * N,
            "image_rgb": _random_input_tensor(N, 3, H, W, False, device),
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
81
82
        }

83
84
        # training foward pass
        model.train()
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
85
        train_preds = model(
86
            **random_args,
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
87
88
89
            evaluation_mode=EvaluationMode.TRAINING,
        )
        self.assertGreater(train_preds["objective"].item(), 0)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
90
        train_preds["objective"].backward()
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
91

92
93
94
        if eval_test:
            model.eval()
            with torch.no_grad():
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
95
                eval_preds = model(
96
97
98
99
100
101
                    **random_args,
                    evaluation_mode=EvaluationMode.EVALUATION,
                )
                self.assertEqual(
                    eval_preds["images_render"].shape,
                    (1, 3, model.render_image_height, model.render_image_width),
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
102
                )
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

    def test_idr(self):
        # Forward pass of GenericModel with IDR.
        device = torch.device("cuda:1")
        args = get_default_args(GenericModel)
        args.renderer_class_type = "SignedDistanceFunctionRenderer"
        args.implicit_function_class_type = "IdrFeatureField"
        args.implicit_function_IdrFeatureField_args.n_harmonic_functions_xyz = 6

        model = GenericModel(**args)
        model.to(device)

        n_train_cameras = 2
        R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
        cameras = PerspectiveCameras(R=R, T=T, device=device)

        defaulted_args = {
            "depth_map": None,
            "mask_crop": None,
            "sequence_name": None,
        }

        target_image_rgb = torch.rand(
            (n_train_cameras, 3, model.render_image_height, model.render_image_width),
            device=device,
        )
        fg_probability = torch.rand(
            (n_train_cameras, 1, model.render_image_height, model.render_image_width),
            device=device,
        )
        train_preds = model(
            camera=cameras,
            evaluation_mode=EvaluationMode.TRAINING,
            image_rgb=target_image_rgb,
            fg_probability=fg_probability,
            **defaulted_args,
        )
        self.assertGreater(train_preds["objective"].item(), 0)
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181


def _random_input_tensor(
    N: int,
    C: int,
    H: int,
    W: int,
    is_binary: bool,
    device: torch.device,
) -> torch.Tensor:
    T = torch.rand(N, C, H, W, device=device)
    if is_binary:
        T = (T > 0.5).float()
    return T


def _load_model_config_from_yaml(config_path, strict=True) -> DictConfig:
    default_cfg = get_default_args(GenericModel)
    cfg = _load_model_config_from_yaml_rec(default_cfg, config_path)
    return cfg


def _load_model_config_from_yaml_rec(cfg: DictConfig, config_path: str) -> DictConfig:
    cfg_loaded = OmegaConf.load(config_path)
    if "generic_model_args" in cfg_loaded:
        cfg_model_loaded = cfg_loaded.generic_model_args
    else:
        cfg_model_loaded = None
    defaults = cfg_loaded.pop("defaults", None)
    if defaults is not None:
        for default_name in defaults:
            if default_name in ("_self_", "default_config"):
                continue
            default_name = os.path.splitext(default_name)[0]
            defpath = os.path.join(os.path.dirname(config_path), default_name + ".yaml")
            cfg = _load_model_config_from_yaml_rec(cfg, defpath)
            if cfg_model_loaded is not None:
                cfg = OmegaConf.merge(cfg, cfg_model_loaded)
    elif cfg_model_loaded is not None:
        cfg = OmegaConf.merge(cfg, cfg_model_loaded)
    return cfg