Unverified Commit d182a6ad authored by Suraj Patil's avatar Suraj Patil Committed by GitHub
Browse files

Add model tests

Add model tests
parents 9c96682a 12da0fe1
...@@ -287,14 +287,14 @@ class UNetModel(ModelMixin, ConfigMixin): ...@@ -287,14 +287,14 @@ class UNetModel(ModelMixin, ConfigMixin):
self.norm_out = Normalize(block_in) self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
def forward(self, x, t): def forward(self, x, timesteps):
assert x.shape[2] == x.shape[3] == self.resolution assert x.shape[2] == x.shape[3] == self.resolution
if not torch.is_tensor(t): if not torch.is_tensor(timesteps):
t = torch.tensor([t], dtype=torch.long, device=x.device) timesteps = torch.tensor([timesteps], dtype=torch.long, device=x.device)
# timestep embedding # timestep embedding
temb = get_timestep_embedding(t, self.ch) temb = get_timestep_embedding(timesteps, self.ch)
temb = self.temb.dense[0](temb) temb = self.temb.dense[0](temb)
temb = nonlinearity(temb) temb = nonlinearity(temb)
temb = self.temb.dense[1](temb) temb = self.temb.dense[1](temb)
......
...@@ -236,8 +236,12 @@ def english_cleaners(text): ...@@ -236,8 +236,12 @@ def english_cleaners(text):
text = collapse_whitespace(text) text = collapse_whitespace(text)
return text return text
try:
_inflect = inflect.engine()
except:
print("inflect is not installed")
_inflect = None
_inflect = inflect.engine()
_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") _comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") _decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") _pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
......
...@@ -14,8 +14,10 @@ ...@@ -14,8 +14,10 @@
# limitations under the License. # limitations under the License.
import inspect
import tempfile import tempfile
import unittest import unittest
import numpy as np
import torch import torch
...@@ -82,7 +84,108 @@ class ConfigTester(unittest.TestCase): ...@@ -82,7 +84,108 @@ class ConfigTester(unittest.TestCase):
assert config == new_config assert config == new_config
class ModelTesterMixin(unittest.TestCase): class ModelTesterMixin:
def test_from_pretrained_save_pretrained(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
new_model = UNetModel.from_pretrained(tmpdirname)
new_model.to(torch_device)
with torch.no_grad():
image = model(**inputs_dict)
new_image = new_model(**inputs_dict)
max_diff = (image - new_image).abs().sum().item()
self.assertLessEqual(max_diff, 1e-5, "Models give different forward passes")
def test_determinism(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**inputs_dict)
second = model(**inputs_dict)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_output(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with torch.no_grad():
output = model(**inputs_dict)
self.assertIsNotNone(output)
expected_shape = inputs_dict["x"].shape
self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
def test_forward_signature(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["x", "timesteps"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model_from_config(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
# test if the model can be loaded from the config
# and has all the expected shape
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_config(tmpdirname)
new_model = self.model_class.from_config(tmpdirname)
new_model.to(torch_device)
new_model.eval()
# check if all paramters shape are the same
for param_name in model.state_dict().keys():
param_1 = model.state_dict()[param_name]
param_2 = new_model.state_dict()[param_name]
self.assertEqual(param_1.shape, param_2.shape)
with torch.no_grad():
output_1 = model(**inputs_dict)
output_2 = new_model(**inputs_dict)
self.assertEqual(output_1.shape, output_2.shape)
def test_training(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
model.train()
output = model(**inputs_dict)
noise = torch.randn(inputs_dict["x"].shape).to(torch_device)
loss = torch.nn.functional.mse_loss(output, noise)
loss.backward()
class UnetModelTests(ModelTesterMixin, unittest.TestCase):
model_class = UNetModel
@property @property
def dummy_input(self): def dummy_input(self):
batch_size = 4 batch_size = 4
...@@ -92,31 +195,51 @@ class ModelTesterMixin(unittest.TestCase): ...@@ -92,31 +195,51 @@ class ModelTesterMixin(unittest.TestCase):
noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
time_step = torch.tensor([10]).to(torch_device) time_step = torch.tensor([10]).to(torch_device)
return (noise, time_step) return {"x": noise, "timesteps": time_step}
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"ch": 32,
"ch_mult": (1, 2),
"num_res_blocks": 2,
"attn_resolutions": (16,),
"resolution": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_from_pretrained_hub(self):
model, loading_info = UNetModel.from_pretrained("fusing/ddpm_dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
def test_from_pretrained_save_pretrained(self):
model = UNetModel(ch=32, ch_mult=(1, 2), num_res_blocks=2, attn_resolutions=(16,), resolution=32)
model.to(torch_device) model.to(torch_device)
image = model(**self.dummy_input)
with tempfile.TemporaryDirectory() as tmpdirname: assert image is not None, "Make sure output is not None"
model.save_pretrained(tmpdirname)
new_model = UNetModel.from_pretrained(tmpdirname)
new_model.to(torch_device)
dummy_input = self.dummy_input def test_output_pretrained(self):
model = UNetModel.from_pretrained("fusing/ddpm_dummy")
model.eval()
image = model(*dummy_input) torch.manual_seed(0)
new_image = new_model(*dummy_input) if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" noise = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution)
print(noise.shape)
time_step = torch.tensor([10])
def test_from_pretrained_hub(self): with torch.no_grad():
model = UNetModel.from_pretrained("fusing/ddpm_dummy") output = model(noise, time_step)
model.to(torch_device)
image = model(*self.dummy_input) output_slice = output[0, -1, -3:, -3:].flatten()
# fmt: off
expected_output_slice = torch.tensor([ 0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053])
# fmt: on
print(output_slice)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
assert image is not None, "Make sure output is not None"
class PipelineTesterMixin(unittest.TestCase): class PipelineTesterMixin(unittest.TestCase):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment