Commit c9a48a52 authored by limm's avatar limm
Browse files

add tests code

parent b7536f78
Pipeline #2778 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.gans import StaticUnconditionalGAN
class TestWGANGP:
@classmethod
def setup_class(cls):
cls.generator_cfg = dict(
type='WGANGPGenerator', noise_size=128, out_scale=128)
cls.discriminator_cfg = dict(
type='WGANGPDiscriminator',
in_channel=3,
in_scale=128,
conv_module_cfg=dict(
conv_cfg=None,
kernel_size=3,
stride=1,
padding=1,
bias=True,
act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
norm_cfg=dict(type='GN'),
order=('conv', 'norm', 'act')))
cls.gan_loss = dict(type='GANLoss', gan_type='wgan')
cls.disc_auxiliary_loss = dict(
type='GradientPenaltyLoss',
loss_weight=10,
norm_mode='pixel',
data_info=dict(
discriminator='disc',
real_data='real_imgs',
fake_data='fake_imgs'))
cls.train_cfg = None
def test_wgangp_cpu(self):
# test default config
wgangp = StaticUnconditionalGAN(
self.generator_cfg,
self.discriminator_cfg,
self.gan_loss,
disc_auxiliary_loss=self.disc_auxiliary_loss,
train_cfg=self.train_cfg)
# test sample from noise
outputs = wgangp.sample_from_noise(None, num_batches=2)
assert outputs.shape == (2, 3, 128, 128)
outputs = wgangp.sample_from_noise(
None, num_batches=2, return_noise=True, sample_model='orig')
assert outputs['fake_img'].shape == (2, 3, 128, 128)
# test train step
data = torch.randn((2, 3, 128, 128))
data_input = dict(real_img=data)
optimizer_g = torch.optim.SGD(wgangp.generator.parameters(), lr=0.01)
optimizer_d = torch.optim.SGD(
wgangp.discriminator.parameters(), lr=0.01)
optim_dict = dict(generator=optimizer_g, discriminator=optimizer_d)
model_outputs = wgangp.train_step(data_input, optim_dict)
assert 'results' in model_outputs
assert 'log_vars' in model_outputs
assert model_outputs['num_samples'] == 2
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_wgangp_cuda(self):
# test default config
wgangp = StaticUnconditionalGAN(
self.generator_cfg,
self.discriminator_cfg,
self.gan_loss,
disc_auxiliary_loss=self.disc_auxiliary_loss,
train_cfg=self.train_cfg).cuda()
# test sample from noise
outputs = wgangp.sample_from_noise(None, num_batches=2)
assert outputs.shape == (2, 3, 128, 128)
outputs = wgangp.sample_from_noise(
None, num_batches=2, return_noise=True, sample_model='orig')
assert outputs['fake_img'].shape == (2, 3, 128, 128)
# test train step
data = torch.randn((2, 3, 128, 128)).cuda()
data_input = dict(real_img=data)
optimizer_g = torch.optim.SGD(wgangp.generator.parameters(), lr=0.01)
optimizer_d = torch.optim.SGD(
wgangp.discriminator.parameters(), lr=0.01)
optim_dict = dict(generator=optimizer_g, discriminator=optimizer_d)
model_outputs = wgangp.train_step(data_input, optim_dict)
assert 'results' in model_outputs
assert 'log_vars' in model_outputs
assert model_outputs['num_samples'] == 2
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures import IDLossModel
# yapf:disable
from mmgen.models.architectures.arcface.model_irse import Backbone
# yapf:enable
class TestArcFace:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
input_size=224,
num_layers=50,
mode='ir',
drop_ratio=0.4,
affine=True)
def test_arcface_cpu(self):
model = Backbone(**self.default_cfg)
x = torch.randn((2, 3, 224, 224))
y = model(x)
assert y.shape == (2, 512)
# test different input size
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_size=112))
model = Backbone(**cfg)
x = torch.randn((2, 3, 112, 112))
y = model(x)
assert y.shape == (2, 512)
# test different num_layers
cfg = deepcopy(self.default_cfg)
cfg.update(dict(num_layers=50))
model = Backbone(**cfg)
x = torch.randn((2, 3, 224, 224))
y = model(x)
assert y.shape == (2, 512)
# test different mode
cfg = deepcopy(self.default_cfg)
cfg.update(dict(mode='ir_se'))
model = Backbone(**cfg)
x = torch.randn((2, 3, 224, 224))
y = model(x)
assert y.shape == (2, 512)
# test different drop ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(drop_ratio=0.8))
model = Backbone(**cfg)
x = torch.randn((2, 3, 224, 224))
y = model(x)
assert y.shape == (2, 512)
# test affine=False
cfg = deepcopy(self.default_cfg)
cfg.update(dict(affine=False))
model = Backbone(**cfg)
x = torch.randn((2, 3, 224, 224))
y = model(x)
assert y.shape == (2, 512)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_arcface_cuda(self):
model = Backbone(**self.default_cfg).cuda()
x = torch.randn((2, 3, 224, 224)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test different input size
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_size=112))
model = Backbone(**cfg).cuda()
x = torch.randn((2, 3, 112, 112)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test different num_layers
cfg = deepcopy(self.default_cfg)
cfg.update(dict(num_layers=50))
model = Backbone(**cfg).cuda()
x = torch.randn((2, 3, 224, 224)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test different mode
cfg = deepcopy(self.default_cfg)
cfg.update(dict(mode='ir_se'))
model = Backbone(**cfg).cuda()
x = torch.randn((2, 3, 224, 224)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test different drop ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(drop_ratio=0.8))
model = Backbone(**cfg).cuda()
x = torch.randn((2, 3, 224, 224)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test affine=False
cfg = deepcopy(self.default_cfg)
cfg.update(dict(affine=False))
model = Backbone(**cfg).cuda()
x = torch.randn((2, 3, 224, 224)).cuda()
y = model(x)
assert y.shape == (2, 512)
# test loss model
id_loss_model = IDLossModel()
x1 = torch.randn((2, 3, 224, 224)).cuda()
x2 = torch.randn((2, 3, 224, 224)).cuda()
y, _ = id_loss_model(pred=x1, gt=x2)
assert y >= 0
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from functools import partial
import pytest
import torch
from mmgen.models import build_module
# yapf:disable
from mmgen.models.architectures.biggan import (BigGANConditionBN,
BigGANDiscResBlock,
BigGANDiscriminator,
BigGANGenerator,
BigGANGenResBlock,
SelfAttentionBlock)
# yapf:enable
class TestBigGANConditionBN:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANConditionBN',
num_features=64,
linear_input_channels=80)
cls.x = torch.randn(2, 64, 32, 32)
cls.y = torch.randn(2, 80)
cls.label = torch.randint(0, 80, (2, ))
def test_biggan_condition_bn(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANConditionBN)
out = module(self.x, self.y)
assert out.shape == (2, 64, 32, 32)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg)
out = module(self.x, self.label)
assert out.shape == (2, 64, 32, 32)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 64, 32, 32)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 64, 32, 32)
# test not-implemented sn-style
with pytest.raises(NotImplementedError):
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='tero'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 64, 32, 32)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_condition_bn_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANConditionBN)
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 64, 32, 32)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.label.cuda())
assert out.shape == (2, 64, 32, 32)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 64, 32, 32)
# test not-implemented sn-style
with pytest.raises(NotImplementedError):
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='tero'))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 64, 32, 32)
class TestSelfAttentionBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(type='SelfAttentionBlock', in_channels=16)
cls.x = torch.randn(2, 16, 8, 8)
def test_self_attention_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, SelfAttentionBlock)
out = module(self.x)
assert out.shape == (2, 16, 8, 8)
# test different in_channels
cfg = deepcopy(self.default_cfg)
cfg.update(dict(in_channels=10))
module = build_module(cfg)
x = torch.randn(2, 10, 8, 8)
out = module(x)
assert out.shape == (2, 10, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 16, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_self_attention_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, SelfAttentionBlock)
out = module(self.x.cuda())
assert out.shape == (2, 16, 8, 8)
# test different in_channels
cfg = deepcopy(self.default_cfg)
cfg.update(dict(in_channels=10))
module = build_module(cfg).cuda()
x = torch.randn(2, 10, 8, 8).cuda()
out = module(x)
assert out.shape == (2, 10, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 16, 8, 8)
class TestBigGANGenResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANGenResBlock',
in_channels=32,
out_channels=16,
dim_after_concat=100,
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='nearest', scale_factor=2),
sn_eps=1e-6,
with_spectral_norm=True,
input_is_label=False,
auto_sync_bn=True)
cls.x = torch.randn(2, 32, 8, 8)
cls.y = torch.randn(2, 100)
cls.label = torch.randint(0, 100, (2, ))
def test_biggan_gen_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANGenResBlock)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg)
out = module(self.x, self.label)
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_gen_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANGenResBlock)
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.label.cuda())
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
class TestBigGANDiscResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANDiscResBlock',
in_channels=32,
out_channels=64,
act_cfg=dict(type='ReLU', inplace=False),
sn_eps=1e-6,
with_downsample=True,
with_spectral_norm=True,
is_head_block=False)
cls.x = torch.randn(2, 32, 16, 16)
def test_biggan_disc_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANDiscResBlock)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_disc_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANDiscResBlock)
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
class TestBigGANGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((3, 120))
num_classes = 1000
cls.label = torch.randint(0, num_classes, (3, ))
cls.default_config = dict(
type='BigGANGenerator',
output_scale=128,
num_classes=num_classes,
base_channels=4)
def test_biggan_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, BigGANGenerator)
res = g(self.noise, self.label)
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise, self.label, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg)
noise = torch.randn((3, 119))
res = g(noise, self.label)
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `split_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(split_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0, with_shared_embedding=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, split_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, BigGANGenerator)
res = g(self.noise.cuda(), self.label.cuda())
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise.cuda(), self.label.cuda(), return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg).cuda()
noise = torch.randn((3, 119)).cuda()
res = g(noise, self.label.cuda())
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `split_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(split_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0, with_shared_embedding=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, split_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
class TestBigGANDiscriminator(object):
@classmethod
def setup_class(cls):
num_classes = 1000
cls.default_config = dict(
type='BigGANDiscriminator',
input_scale=128,
num_classes=num_classes,
base_channels=8)
cls.x = torch.randn((2, 3, 128, 128))
cls.label = torch.randint(0, num_classes, (2, ))
def test_biggan_discriminator(self):
# test default settings
d = build_module(self.default_config)
assert isinstance(d, BigGANDiscriminator)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg)
y = d(self.x, None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_discriminator_cuda(self):
# test default settings
d = build_module(self.default_config).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from functools import partial
import pytest
import torch
from mmgen.models import build_module
# yapf:disable
from mmgen.models.architectures.biggan import (BigGANDeepDiscResBlock,
BigGANDeepDiscriminator,
BigGANDeepGenerator,
BigGANDeepGenResBlock)
# yapf:enable
class TestBigGANDeepGenResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANDeepGenResBlock',
in_channels=32,
out_channels=16,
dim_after_concat=100,
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='nearest', scale_factor=2),
sn_eps=1e-6,
bn_eps=1e-5,
with_spectral_norm=True,
input_is_label=False,
auto_sync_bn=True,
channel_ratio=4)
cls.x = torch.randn(2, 32, 8, 8)
cls.y = torch.randn(2, 100)
cls.label = torch.randint(0, 100, (2, ))
def test_biggan_deep_gen_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANDeepGenResBlock)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg)
out = module(self.x, self.label)
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_gen_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANDeepGenResBlock)
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.label.cuda())
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
class TestBigGANDeepDiscResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANDeepDiscResBlock',
in_channels=32,
out_channels=64,
channel_ratio=4,
act_cfg=dict(type='ReLU', inplace=False),
sn_eps=1e-6,
with_downsample=True,
with_spectral_norm=True)
cls.x = torch.randn(2, 32, 16, 16)
def test_biggan_deep_disc_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANDeepDiscResBlock)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 16, 16)
# test different channel_ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(channel_ratio=8))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_disc_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANDeepDiscResBlock)
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 16, 16)
# test different channel_ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(channel_ratio=8))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
class TestBigGANDeepGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((3, 120))
num_classes = 1000
cls.label = torch.randint(0, num_classes, (3, ))
cls.default_config = dict(
type='BigGANDeepGenerator',
output_scale=128,
num_classes=num_classes,
base_channels=4)
def test_biggan_deep_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, BigGANDeepGenerator)
res = g(self.noise, self.label)
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise, self.label, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg)
noise = torch.randn((3, 120))
res = g(noise, self.label)
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `concat_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(
dict(
num_classes=0, with_shared_embedding=False,
concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg)
res = g(self.noise, self.label)
assert res.shape == (3, 3, 128, 128)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, BigGANDeepGenerator)
res = g(self.noise.cuda(), self.label.cuda())
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise.cuda(), self.label.cuda(), return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg).cuda()
noise = torch.randn((3, 120))
res = g(noise.cuda(), self.label.cuda())
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `concat_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(
dict(
num_classes=0, with_shared_embedding=False,
concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
class TestBigGANDeepDiscriminator(object):
@classmethod
def setup_class(cls):
num_classes = 1000
cls.default_config = dict(
type='BigGANDeepDiscriminator',
input_scale=128,
num_classes=num_classes,
base_channels=8)
cls.x = torch.randn((2, 3, 128, 128))
cls.label = torch.randint(0, num_classes, (2, ))
def test_biggan_deep_discriminator(self):
# test default settings
d = build_module(self.default_config)
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg)
y = d(self.x, None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_discriminator_cuda(self):
# test default settings
d = build_module(self.default_config).cuda()
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures.cyclegan import ResnetGenerator
class TestResnetGenerator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=3,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02))
def test_cyclegan_generator_cpu(self):
# test with default cfg
real_a = torch.randn((2, 3, 256, 256))
gen = ResnetGenerator(**self.default_cfg)
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_blocks'] = 8
gen = ResnetGenerator(**cfg)
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cyclegan_generator_cuda(self):
# test with default cfg
real_a = torch.randn((2, 3, 256, 256)).cuda()
gen = ResnetGenerator(**self.default_cfg).cuda()
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_blocks'] = 8
gen = ResnetGenerator(**cfg).cuda()
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models import DCGANDiscriminator, DCGANGenerator, build_module
class TestDCGANGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((2, 100))
cls.default_config = dict(
type='DCGANGenerator', output_scale=16, base_channels=32)
def test_dcgan_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, DCGANGenerator)
assert g.num_upsamples == 2
assert not g.output_layer.with_norm
assert len(g.upsampling) == 1
assert g.upsampling[0].with_norm
assert g.noise2feat.with_norm
assert isinstance(g.output_layer.activate, torch.nn.Tanh)
# check forward function
img = g(self.noise)
assert img.shape == (2, 3, 16, 16)
img = g(self.noise[:, :, None, None])
assert img.shape == (2, 3, 16, 16)
img = g(torch.randn, num_batches=2)
assert img.shape == (2, 3, 16, 16)
img = g(None, num_batches=2)
assert img.shape == (2, 3, 16, 16)
with pytest.raises(ValueError):
_ = g(torch.randn((1, 100, 3)))
with pytest.raises(AssertionError):
_ = g(torch.randn)
with pytest.raises(AssertionError):
_ = g(None)
with pytest.raises(AssertionError):
_ = g(torch.randn(2, 10))
results = g(self.noise, return_noise=True)
assert results['noise_batch'].shape == (2, 100, 1, 1)
# sanity check for args with cpu model
g = DCGANGenerator(32, base_channels=64)
img = g(self.noise)
assert img.shape == (2, 3, 32, 32)
assert g.base_channels == 64
g = DCGANGenerator(16, out_channels=1, base_channels=32)
img = g(self.noise)
assert img.shape == (2, 1, 16, 16)
g = DCGANGenerator(16, noise_size=10, base_channels=32)
with pytest.raises(AssertionError):
_ = g(self.noise)
img = g(torch.randn(2, 10))
assert img.shape == (2, 3, 16, 16)
g = DCGANGenerator(
16, default_act_cfg=dict(type='LeakyReLU'), base_channels=32)
assert isinstance(g.noise2feat.activate, torch.nn.LeakyReLU)
assert isinstance(g.upsampling[0].activate, torch.nn.LeakyReLU)
assert isinstance(g.output_layer.activate, torch.nn.Tanh)
with pytest.raises(TypeError):
_ = DCGANGenerator(
16, noise_size=10, base_channels=32, pretrained=dict())
# check for cuda
if not torch.cuda.is_available():
return
g = build_module(self.default_config).cuda()
assert isinstance(g, DCGANGenerator)
assert g.num_upsamples == 2
assert not g.output_layer.with_norm
assert len(g.upsampling) == 1
assert g.upsampling[0].with_norm
# check forward function
img = g(self.noise)
assert img.shape == (2, 3, 16, 16)
img = g(self.noise[:, :, None, None])
assert img.shape == (2, 3, 16, 16)
img = g(torch.randn, num_batches=2)
assert img.shape == (2, 3, 16, 16)
img = g(None, num_batches=2)
assert img.shape == (2, 3, 16, 16)
with pytest.raises(ValueError):
_ = g(torch.randn((1, 100, 3)))
with pytest.raises(AssertionError):
_ = g(torch.randn)
with pytest.raises(AssertionError):
_ = g(None)
results = g(self.noise, return_noise=True)
assert results['noise_batch'].shape == (2, 100, 1, 1)
class TestDCGANDiscriminator(object):
@classmethod
def setup_class(cls):
cls.input_tensor = torch.randn((2, 3, 32, 32))
cls.default_config = dict(
type='DCGANDiscriminator',
input_scale=32,
output_scale=4,
out_channels=5)
def test_dcgan_discriminator(self):
# test default setting with builder
d = build_module(self.default_config)
pred = d(self.input_tensor)
assert pred.shape == (2, 5)
assert d.num_downsamples == 3
assert len(d.downsamples) == 3
assert not d.downsamples[0].with_norm
assert not d.output_layer.with_norm
assert not d.output_layer.with_activation
assert isinstance(d.downsamples[1].activate, torch.nn.LeakyReLU)
assert isinstance(d.downsamples[1].norm, torch.nn.BatchNorm2d)
# sanity check for args with cpu model
d = DCGANDiscriminator(input_scale=64, output_scale=8, out_channels=2)
assert d.input_scale == 64 and d.output_scale == 8
assert d.num_downsamples == 3
assert d.out_channels == 2
pred = d(torch.randn((1, 3, 64, 64)))
assert pred.shape == (1, 50)
with pytest.raises(TypeError):
_ = DCGANDiscriminator(32, 4, 2, pretrained=dict())
# check for cuda
if not torch.cuda.is_available():
return
# test default setting with builder on GPU
d = build_module(self.default_config).cuda()
pred = d(self.input_tensor.cuda())
assert pred.shape == (2, 5)
assert d.num_downsamples == 3
assert len(d.downsamples) == 3
assert not d.downsamples[0].with_norm
assert not d.output_layer.with_norm
assert not d.output_layer.with_activation
assert isinstance(d.downsamples[1].activate, torch.nn.LeakyReLU)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models import DenoisingUnet, build_module
class TestDDPM:
@classmethod
def setup_class(cls):
cls.denoising_cfg = dict(
type='DenoisingUnet',
image_size=32,
in_channels=3,
base_channels=128,
resblocks_per_downsample=3,
attention_res=[16, 8],
use_scale_shift_norm=True,
dropout=0,
num_heads=4,
use_rescale_timesteps=True,
output_cfg=dict(mean='eps', var='learned_range'),
num_timesteps=2000)
cls.x_t = torch.randn(2, 3, 32, 32)
cls.timesteps = torch.LongTensor([999, 1999])
cls.label = torch.randint(0, 10, (2, ))
def test_denoising_cpu(self):
# test default config
denoising = build_module(self.denoising_cfg)
assert isinstance(denoising, DenoisingUnet)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
assert 'eps_t_pred' in output_dict
assert 'factor' in output_dict
assert 'x_t' in output_dict
assert 't_rescaled' in output_dict
assert (output_dict['x_t'] == self.x_t).all()
assert (output_dict['t_rescaled'] < 1000).all()
assert (output_dict['factor'] < 1).all()
assert (output_dict['factor'] > 0).all()
# test image size --> list input
config = deepcopy(self.denoising_cfg)
config['image_size'] = [32, 32]
output_dict = denoising(self.x_t, self.timesteps)
assert 'eps_t_pred' in output_dict
assert 'factor' in output_dict
assert output_dict['eps_t_pred'].shape == (2, 3, 32, 32)
# test image size --> raise type error
config = deepcopy(self.denoising_cfg)
config['image_size'] = '32'
with pytest.raises(TypeError):
build_module(config)
# test image size --> wrong list length
config = deepcopy(self.denoising_cfg)
config['image_size'] = [32, 32, 32]
with pytest.raises(AssertionError):
build_module(config)
# test image size --> wrong list element
config = deepcopy(self.denoising_cfg)
config['image_size'] = [32, 64]
with pytest.raises(AssertionError):
build_module(config)
# test channels_cfg --> list
config = deepcopy(self.denoising_cfg)
config['channels_cfg'] = [1, 2, 2, 2]
denoising = build_module(config)
assert isinstance(denoising, DenoisingUnet)
output_dict = denoising(self.x_t, self.timesteps)
# test channels_cfg --> dict
config = deepcopy(self.denoising_cfg)
config['channels_cfg'] = {32: [1, 2, 2, 2, 2]}
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps)
assert 'eps_t_pred' in output_dict
assert 'factor' in output_dict
assert (output_dict['factor'] < 1).all()
assert (output_dict['factor'] > 0).all()
# test channels_cfg --> no image size error
config = deepcopy(self.denoising_cfg)
config['image_size'] = 194
with pytest.raises(KeyError):
denoising = build_module(config)
# test channels_cfg --> wrong type error
config = deepcopy(self.denoising_cfg)
config['channels_cfg'] = '1, 2, 2, 2'
with pytest.raises(ValueError):
denoising = build_module(config)
# test use rescale timesteps
config = deepcopy(self.denoising_cfg)
config['use_rescale_timesteps'] = False
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
assert (output_dict['t_rescaled'] == self.timesteps).all()
# test var_mode --> LEARNED
config = deepcopy(self.denoising_cfg)
config['output_cfg']['var'] = 'LEARNED'
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
assert 'logvar' in output_dict
# test var_mode --> FIXED
config = deepcopy(self.denoising_cfg)
config['output_cfg']['var'] = 'FIXED_SMALL'
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
assert 'factor' not in output_dict and 'logvar' not in output_dict
# test var_mode --> raise error
config = deepcopy(self.denoising_cfg)
config['output_cfg']['var'] = 'ERROR'
denoising = build_module(config)
with pytest.raises(AttributeError):
output_dict = denoising(
self.x_t, self.timesteps, return_noise=True)
# test mean_mode --> START_X
config = deepcopy(self.denoising_cfg)
config['output_cfg']['mean'] = 'START_X'
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
assert 'x_0_pred' in output_dict
# test mean_mode --> START_X
config = deepcopy(self.denoising_cfg)
config['output_cfg']['mean'] = 'PREVIOUS_X'
denoising = build_module(config)
output_dict = denoising(self.x_t, self.timesteps, return_noise=True)
# print(output_dict.keys())
assert 'x_tm1_pred' in output_dict
# test var_mode --> raise error
config = deepcopy(self.denoising_cfg)
config['output_cfg']['mean'] = 'ERROR'
denoising = build_module(config)
with pytest.raises(AttributeError):
output_dict = denoising(
self.x_t, self.timesteps, return_noise=True)
# test timestep embedding --> raise error
config = deepcopy(self.denoising_cfg)
config['time_embedding_mode'] = 'cos'
with pytest.raises(ValueError):
denoising = build_module(config)
# test timestep embedding --> new config
config = deepcopy(self.denoising_cfg)
config['time_embedding_cfg'] = dict(max_period=1000)
denoising = build_module(config)
# test class-conditional denoising
config = deepcopy(self.denoising_cfg)
config['num_classes'] = 10
denoising = build_module(config)
output_dict = denoising(
self.x_t, self.timesteps, self.label, return_noise=True)
assert 'label' in output_dict
assert (output_dict['label'] == self.label).all()
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.architectures import InceptionV3
class TestFIDInception:
@classmethod
def setup_class(cls):
cls.load_fid_inception = False
def test_fid_inception(self):
inception = InceptionV3(load_fid_inception=self.load_fid_inception)
imgs = torch.randn((2, 3, 256, 256))
out = inception(imgs)[0]
assert out.shape == (2, 2048, 1, 1)
imgs = torch.randn((2, 3, 512, 512))
out = inception(imgs)[0]
assert out.shape == (2, 2048, 1, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_fid_inception_cuda(self):
inception = InceptionV3(
load_fid_inception=self.load_fid_inception).cuda()
imgs = torch.randn((2, 3, 256, 256)).cuda()
out = inception(imgs)[0]
assert out.shape == (2, 2048, 1, 1)
imgs = torch.randn((2, 3, 512, 512)).cuda()
out = inception(imgs)[0]
assert out.shape == (2, 2048, 1, 1)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.architectures import PerceptualLoss
class TestLpips:
@classmethod
def setup_class(cls):
cls.pretrained = False
def test_lpips(self):
percept = PerceptualLoss(use_gpu=False, pretrained=self.pretrained)
img_a = torch.randn((2, 3, 256, 256))
img_b = torch.randn((2, 3, 256, 256))
perceptual_loss = percept(img_a, img_b)
assert perceptual_loss.shape == (2, 1, 1, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_lpips_cuda(self):
percept = PerceptualLoss(use_gpu=True, pretrained=self.pretrained)
img_a = torch.randn((2, 3, 256, 256)).cuda()
img_b = torch.randn((2, 3, 256, 256)).cuda()
perceptual_loss = percept(img_a, img_b)
assert perceptual_loss.shape == (2, 1, 1, 1)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models import LSGANDiscriminator, LSGANGenerator, build_module
class TestLSGANGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((3, 128))
cls.default_config = dict(
type='LSGANGenerator', noise_size=128, output_scale=128)
def test_lsgan_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, LSGANGenerator)
x = g(None, num_batches=3)
assert x.shape == (3, 3, 128, 128)
x = g(None, num_batches=3, return_noise=True)
assert x['noise_batch'].shape == (3, 128)
x = g(self.noise, return_noise=True)
assert x['noise_batch'].shape == (3, 128)
x = g(torch.randn, num_batches=3, return_noise=True)
assert x['noise_batch'].shape == (3, 128)
# test different output_scale
config = dict(type='LSGANGenerator', noise_size=128, output_scale=64)
g = build_module(config)
assert isinstance(g, LSGANGenerator)
x = g(None, num_batches=3)
assert x.shape == (3, 3, 64, 64)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_lsgan_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, LSGANGenerator)
x = g(None, num_batches=3)
assert x.shape == (3, 3, 128, 128)
x = g(None, num_batches=3, return_noise=True)
assert x['noise_batch'].shape == (3, 128)
x = g(self.noise.cuda(), return_noise=True)
assert x['noise_batch'].shape == (3, 128)
x = g(torch.randn, num_batches=3, return_noise=True)
assert x['noise_batch'].shape == (3, 128)
# test different output_scale
config = dict(type='LSGANGenerator', noise_size=128, output_scale=64)
g = build_module(config).cuda()
assert isinstance(g, LSGANGenerator)
x = g(None, num_batches=3)
assert x.shape == (3, 3, 64, 64)
class TestLSGANDiscriminator(object):
@classmethod
def setup_class(cls):
cls.x = torch.randn((2, 3, 128, 128))
cls.default_config = dict(
type='LSGANDiscriminator', in_channels=3, input_scale=128)
def test_lsgan_discriminator(self):
# test default setting with builder
d = build_module(self.default_config)
assert isinstance(d, LSGANDiscriminator)
score = d(self.x)
assert score.shape == (2, 1)
# test different input_scale
config = dict(type='LSGANDiscriminator', in_channels=3, input_scale=64)
d = build_module(config)
assert isinstance(d, LSGANDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x)
assert score.shape == (2, 1)
# test different config
config = dict(
type='LSGANDiscriminator',
in_channels=3,
input_scale=64,
out_act_cfg=dict(type='Sigmoid'))
d = build_module(config)
assert isinstance(d, LSGANDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_lsgan_discriminator_cuda(self):
# test default setting with builder
d = build_module(self.default_config).cuda()
assert isinstance(d, LSGANDiscriminator)
score = d(self.x.cuda())
assert score.shape == (2, 1)
# test different input_scale
config = dict(type='LSGANDiscriminator', in_channels=3, input_scale=64)
d = build_module(config).cuda()
assert isinstance(d, LSGANDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x.cuda())
assert score.shape == (2, 1)
# test different config
config = dict(
type='LSGANDiscriminator',
in_channels=3,
input_scale=64,
out_act_cfg=dict(type='Sigmoid'))
d = build_module(config).cuda()
assert isinstance(d, LSGANDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x.cuda())
assert score.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import torch
from mmgen.models.architectures.stylegan import MSStyleGANv2Generator
class TestMSStyleGAN2:
@classmethod
def setup_class(cls):
cls.generator_cfg = dict(out_size=32, style_channels=16)
cls.disc_cfg = dict(in_size=32, with_adaptive_pool=True)
def test_msstylegan2_cpu(self):
# test normal forward
cfg_ = deepcopy(self.generator_cfg)
g = MSStyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 32, 32)
# set mix_prob as 1.0 and 0 to force cover lines
cfg_ = deepcopy(self.generator_cfg)
cfg_['mix_prob'] = 1
g = MSStyleGANv2Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 32, 32)
cfg_ = deepcopy(self.generator_cfg)
cfg_['mix_prob'] = 0
g = MSStyleGANv2Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 32, 32)
cfg_ = deepcopy(self.generator_cfg)
cfg_['mix_prob'] = 1
g = MSStyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 32, 32)
cfg_ = deepcopy(self.generator_cfg)
cfg_['mix_prob'] = 0
g = MSStyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 32, 32)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.architectures.positional_encoding import CatersianGrid as CSG
from mmgen.models.architectures.positional_encoding import \
SinusoidalPositionalEmbedding as SPE
class TestSPE:
@classmethod
def setup_class(cls):
cls.spe = SPE(4, 0, 32)
def test_spe_cpu(self):
# test spe 1d
embed = self.spe(torch.randn((2, 10)))
assert embed.shape == (2, 10, 4)
# test spe 2d
embed = self.spe(torch.randn((2, 3, 8, 8)))
assert embed.shape == (2, 8, 8, 8)
with pytest.raises(AssertionError):
_ = self.spe(torch.randn(2, 3, 3))
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_spe_gpu(self):
spe = self.spe.cuda()
# test spe 1d
embed = spe(torch.randn((2, 10)).cuda())
assert embed.shape == (2, 10, 4)
assert embed.is_cuda
# test spe 2d
embed = spe(torch.randn((2, 3, 8, 8)).cuda())
assert embed.shape == (2, 8, 8, 8)
with pytest.raises(AssertionError):
_ = spe(torch.randn(2, 3, 3))
class TestCSG:
@classmethod
def setup_class(cls):
cls.csg = CSG()
def test_csg_cpu(self):
csg = self.csg(torch.randn((2, 3, 4, 4)))
assert csg.shape == (2, 2, 4, 4)
with pytest.raises(AssertionError):
_ = self.csg(torch.randn((2, 3, 3)))
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_csg_cuda(self):
embed = self.csg(torch.randn((2, 4, 5, 5)).cuda())
assert embed.shape == (2, 2, 5, 5) and embed.is_cuda
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
import torch.nn as nn
from mmgen.models import (EqualizedLR, EqualizedLRConvDownModule,
EqualizedLRConvModule, EqualizedLRConvUpModule,
EqualizedLRLinearModule, MiniBatchStddevLayer,
PGGANNoiseTo2DFeat, PixelNorm, equalized_lr)
from mmgen.models.architectures.pggan import PGGANDiscriminator, PGGANGenerator
class TestEqualizedLR:
@classmethod
def setup_class(cls):
cls.default_conv_cfg = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=dict(type='BN'))
cls.conv_input = torch.randn((2, 1, 5, 5))
cls.linear_input = torch.randn((2, 2))
def test_equalized_conv_module(self):
conv = EqualizedLRConvModule(**self.default_conv_cfg)
res = conv(self.conv_input)
assert res.shape == (2, 1, 5, 5)
has_equalized_lr = False
for _, v in conv.conv._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert has_equalized_lr
conv = EqualizedLRConvModule(
equalized_lr_cfg=None, **self.default_conv_cfg)
res = conv(self.conv_input)
assert res.shape == (2, 1, 5, 5)
has_equalized_lr = False
for _, v in conv.conv._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert not has_equalized_lr
conv = EqualizedLRConvModule(
equalized_lr_cfg=dict(gain=1), **self.default_conv_cfg)
res = conv(self.conv_input)
assert res.shape == (2, 1, 5, 5)
has_equalized_lr = False
for _, v in conv.conv._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
assert v.gain == 1
has_equalized_lr = True
assert has_equalized_lr
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_equalized_conv_module_cuda(self):
conv = EqualizedLRConvModule(**self.default_conv_cfg).cuda()
res = conv(self.conv_input.cuda())
assert res.shape == (2, 1, 5, 5)
has_equalized_lr = False
for _, v in conv.conv._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert has_equalized_lr
def test_equalized_linear_module(self):
linear = EqualizedLRLinearModule(2, 2)
res = linear(self.linear_input)
assert res.shape == (2, 2)
has_equalized_lr = False
for _, v in linear._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert has_equalized_lr
linear = EqualizedLRLinearModule(2, 2, equalized_lr_cfg=None)
res = linear(self.linear_input)
assert res.shape == (2, 2)
has_equalized_lr = False
for _, v in linear._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert not has_equalized_lr
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_equalized_linear_module_cuda(self):
linear = EqualizedLRLinearModule(2, 2).cuda()
res = linear(self.linear_input.cuda())
assert res.shape == (2, 2)
has_equalized_lr = False
for _, v in linear._forward_pre_hooks.items():
if isinstance(v, EqualizedLR):
has_equalized_lr = True
assert has_equalized_lr
def test_equalized_lr(self):
with pytest.raises(RuntimeError):
conv = nn.Conv2d(1, 1, 3, 1, 1)
conv = equalized_lr(conv)
conv = equalized_lr(conv)
class TestEqualizedLRConvUpModule:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=1,
kernel_size=3,
padding=1,
stride=2,
conv_cfg=dict(type='deconv'),
upsample=dict(type='fused_nn'),
norm_cfg=dict(type='PixelNorm'))
cls.default_input = torch.randn((2, 3, 5, 5))
def test_equalized_lr_convup_module(self, ):
convup = EqualizedLRConvUpModule(**self.default_cfg)
res = convup(self.default_input)
assert res.shape == (2, 1, 10, 10)
# test bp
res = convup(torch.randn((2, 3, 5, 5), requires_grad=True))
assert res.shape == (2, 1, 10, 10)
res.mean().backward()
# test nearest
cfg_ = deepcopy(self.default_cfg)
cfg_['upsample'] = dict(type='nearest', scale_factor=2)
cfg_['kernel_size'] = 4
convup = EqualizedLRConvUpModule(**cfg_)
res = convup(self.default_input)
assert res.shape == (2, 1, 20, 20)
# test nearest
cfg_ = deepcopy(self.default_cfg)
cfg_['upsample'] = None
cfg_['kernel_size'] = 4
convup = EqualizedLRConvUpModule(**cfg_)
res = convup(self.default_input)
assert res.shape == (2, 1, 10, 10)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_equalized_lr_convup_module_cuda(self):
convup = EqualizedLRConvUpModule(**self.default_cfg).cuda()
res = convup(self.default_input.cuda())
assert res.shape == (2, 1, 10, 10)
# test bp
res = convup(torch.randn((2, 3, 5, 5), requires_grad=True).cuda())
assert res.shape == (2, 1, 10, 10)
res.mean().backward()
class TestEqualizedLRConvDownModule:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=1,
kernel_size=3,
padding=1,
stride=2,
downsample=dict(type='fused_pool'))
cls.default_input = torch.randn((2, 3, 8, 8))
def test_equalized_lr_conv_down(self):
convdown = EqualizedLRConvDownModule(**self.default_cfg)
res = convdown(self.default_input)
assert res.shape == (2, 1, 4, 4)
# test bp
res = convdown(torch.randn((2, 3, 8, 8), requires_grad=True))
assert res.shape == (2, 1, 4, 4)
res.mean().backward()
# test avg pool
cfg_ = deepcopy(self.default_cfg)
cfg_['downsample'] = dict(type='avgpool', kernel_size=2, stride=2)
convdown = EqualizedLRConvDownModule(**cfg_)
res = convdown(self.default_input)
assert res.shape == (2, 1, 2, 2)
# test downsample is None
cfg_ = deepcopy(self.default_cfg)
cfg_['downsample'] = None
convdown = EqualizedLRConvDownModule(**cfg_)
res = convdown(self.default_input)
assert res.shape == (2, 1, 4, 4)
with pytest.raises(NotImplementedError):
cfg_ = deepcopy(self.default_cfg)
cfg_['downsample'] = dict(type='xxx', kernel_size=2, stride=2)
_ = EqualizedLRConvDownModule(**cfg_)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_equalized_lr_conv_down_cuda(self):
convdown = EqualizedLRConvDownModule(**self.default_cfg).cuda()
res = convdown(self.default_input.cuda())
assert res.shape == (2, 1, 4, 4)
# test bp
res = convdown(torch.randn((2, 3, 8, 8), requires_grad=True).cuda())
assert res.shape == (2, 1, 4, 4)
res.mean().backward()
class TestPixelNorm:
@classmethod
def setup_class(cls):
cls.input_tensor = torch.randn((2, 3, 4, 4))
def test_pixel_norm(self):
pn = PixelNorm()
res = pn(self.input_tensor)
assert res.shape == (2, 3, 4, 4)
# test zero case
res = pn(self.input_tensor * 0)
assert res.shape == (2, 3, 4, 4)
assert (res == 0).all()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pixel_norm_cuda(self):
pn = PixelNorm().cuda()
res = pn(self.input_tensor.cuda())
assert res.shape == (2, 3, 4, 4)
# test zero case
res = pn(self.input_tensor.cuda() * 0)
assert res.shape == (2, 3, 4, 4)
assert (res == 0).all()
class TestMiniBatchStddevLayer:
@classmethod
def setup_class(cls):
cls.default_input = torch.randn((2, 3, 4, 4))
def test_minibatch_stddev_layer(self):
ministd_layer = MiniBatchStddevLayer()
res = ministd_layer(self.default_input)
assert res.shape == (2, 4, 4, 4)
with pytest.raises(AssertionError):
_ = ministd_layer(torch.randn((5, 4, 3, 3)))
ministd_layer = MiniBatchStddevLayer(group_size=3)
res = ministd_layer(torch.randn((2, 6, 4, 4)))
assert res.shape == (2, 7, 4, 4)
# test bp
ministd_layer = MiniBatchStddevLayer()
res = ministd_layer(self.default_input.requires_grad_())
assert res.shape == (2, 4, 4, 4)
res.mean().backward()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_minibatch_stddev_layer_cuda(self):
ministd_layer = MiniBatchStddevLayer().cuda()
res = ministd_layer(self.default_input.cuda())
assert res.shape == (2, 4, 4, 4)
ministd_layer = MiniBatchStddevLayer(group_size=3).cuda()
res = ministd_layer(torch.randn((2, 6, 4, 4)).cuda())
assert res.shape == (2, 7, 4, 4)
# test bp
ministd_layer = MiniBatchStddevLayer().cuda()
res = ministd_layer(self.default_input.requires_grad_().cuda())
assert res.shape == (2, 4, 4, 4)
res.mean().backward()
class TestPGGANNoiseTo2DFeat:
@classmethod
def setup_class(cls):
cls.default_input = torch.randn((2, 10))
cls.default_cfg = dict(noise_size=10, out_channels=1)
def test_pggan_noise2feat(self):
module = PGGANNoiseTo2DFeat(**self.default_cfg)
res = module(self.default_input)
assert res.shape == (2, 1, 4, 4)
assert isinstance(module.linear, EqualizedLRLinearModule)
assert not module.linear.bias
assert module.with_norm
assert isinstance(module.norm, PixelNorm)
assert isinstance(module.activation, nn.LeakyReLU)
module = PGGANNoiseTo2DFeat(**self.default_cfg, act_cfg=None)
res = module(self.default_input)
assert res.shape == (2, 1, 4, 4)
assert isinstance(module.linear, EqualizedLRLinearModule)
assert not module.linear.bias
assert module.with_norm
assert not module.with_activation
module = PGGANNoiseTo2DFeat(
**self.default_cfg, norm_cfg=None, normalize_latent=False)
res = module(self.default_input)
assert res.shape == (2, 1, 4, 4)
assert isinstance(module.linear, EqualizedLRLinearModule)
assert not module.linear.bias
assert not module.with_norm
assert isinstance(module.activation, nn.LeakyReLU)
with pytest.raises(AssertionError):
_ = module(torch.randn((2, 1, 2)))
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pggan_noise2feat_cuda(self):
module = PGGANNoiseTo2DFeat(**self.default_cfg).cuda()
res = module(self.default_input.cuda())
assert res.shape == (2, 1, 4, 4)
assert isinstance(module.linear, EqualizedLRLinearModule)
assert not module.linear.bias
assert module.with_norm
assert isinstance(module.activation, nn.LeakyReLU)
class TestPGGANGenerator:
@classmethod
def setup_class(cls):
cls.default_noise = torch.randn((2, 8))
cls.default_cfg = dict(
noise_size=8, out_scale=16, base_channels=32, max_channels=32)
def test_pggan_generator(self):
# test with default cfg
gen = PGGANGenerator(**self.default_cfg)
res = gen(None, num_batches=2, transition_weight=0.1)
assert res.shape == (2, 3, 16, 16)
res = gen(self.default_noise, transition_weight=0.2)
assert res.shape == (2, 3, 16, 16)
with pytest.raises(AssertionError):
_ = gen(self.default_noise[:, :, None], transition_weight=0.2)
with pytest.raises(AssertionError):
_ = gen(torch.randn((2, 1)), transition_weight=0.2)
res = gen(torch.randn, num_batches=2, transition_weight=0.2)
assert res.shape == (2, 3, 16, 16)
# test with input scale
res = gen(None, num_batches=2, curr_scale=4)
assert res.shape == (2, 3, 4, 4)
res = gen(None, num_batches=2, curr_scale=8)
assert res.shape == (2, 3, 8, 8)
# test return noise
res = gen(None, num_batches=2, curr_scale=8, return_noise=True)
assert res['fake_img'].shape == (2, 3, 8, 8)
assert res['label'] is None
assert isinstance(res['noise_batch'], torch.Tensor)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['out_scale'] = 32
gen = PGGANGenerator(**cfg)
res = gen(None, num_batches=2, transition_weight=0.1)
assert res.shape == (2, 3, 32, 32)
cfg = deepcopy(self.default_cfg)
cfg['out_scale'] = 4
gen = PGGANGenerator(**cfg)
res = gen(None, num_batches=2, transition_weight=0.1)
assert res.shape == (2, 3, 4, 4)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pggan_generator_cuda(self):
# test with default cfg
gen = PGGANGenerator(**self.default_cfg).cuda()
res = gen(None, num_batches=2, transition_weight=0.1)
assert res.shape == (2, 3, 16, 16)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['out_scale'] = 32
gen = PGGANGenerator(**cfg).cuda()
res = gen(None, num_batches=2, transition_weight=0.1)
assert res.shape == (2, 3, 32, 32)
class TestPGGANDiscriminator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_scale=16, label_size=2)
cls.default_inputx16 = torch.randn((2, 3, 16, 16))
cls.default_inputx4 = torch.randn((2, 3, 4, 4))
cls.default_inputx8 = torch.randn((2, 3, 8, 8))
def test_pggan_discriminator(self):
# test with default cfg
disc = PGGANDiscriminator(**self.default_cfg)
score, label = disc(self.default_inputx16, transition_weight=0.1)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score, label = disc(
self.default_inputx8, transition_weight=0.1, curr_scale=8)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score, label = disc(
self.default_inputx4, transition_weight=0.1, curr_scale=4)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
disc = PGGANDiscriminator(
in_scale=16,
mbstd_cfg=None,
downsample_cfg=dict(type='nearest', scale_factor=0.5))
score = disc(self.default_inputx16, transition_weight=0.1)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score = disc(self.default_inputx8, transition_weight=0.1, curr_scale=8)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score = disc(self.default_inputx4, transition_weight=0.1, curr_scale=4)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
assert not disc.with_mbstd
with pytest.raises(NotImplementedError):
_ = PGGANDiscriminator(
in_scale=16, mbstd_cfg=None, downsample_cfg=dict(type='xx'))
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pggan_discriminator_cuda(self):
# test with default cfg
disc = PGGANDiscriminator(**self.default_cfg).cuda()
score, label = disc(
self.default_inputx16.cuda(), transition_weight=0.1)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score, label = disc(
self.default_inputx8.cuda(), transition_weight=0.1, curr_scale=8)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
score, label = disc(
self.default_inputx4.cuda(), transition_weight=0.1, curr_scale=4)
assert score.shape == (2, 1)
assert label.shape == (2, 2)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures.pix2pix import (PatchDiscriminator,
UnetGenerator)
class TestUnetGenerator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02))
def test_pix2pix_generator_cpu(self):
# test with default cfg
real_a = torch.randn((2, 3, 256, 256))
gen = UnetGenerator(**self.default_cfg)
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_down'] = 7
gen = UnetGenerator(**cfg)
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pix2pix_generator_cuda(self):
# test with default cfg
real_a = torch.randn((2, 3, 256, 256)).cuda()
gen = UnetGenerator(**self.default_cfg).cuda()
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_down'] = 7
gen = UnetGenerator(**cfg).cuda()
fake_b = gen(real_a)
assert fake_b.shape == (2, 3, 256, 256)
class TestPatchDiscriminator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=6,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02))
cls.default_inputx256 = torch.randn((2, 6, 256, 256))
def test_pix2pix_discriminator_cpu(self):
# test with default cfg
disc = PatchDiscriminator(**self.default_cfg)
score = disc(self.default_inputx256)
assert score.shape == (2, 1, 30, 30)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_conv'] = 4
disc = PatchDiscriminator(**cfg)
score = disc(self.default_inputx256)
assert score.shape == (2, 1, 14, 14)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_pix2pix_discriminator_cuda(self):
# test with default cfg
disc = PatchDiscriminator(**self.default_cfg).cuda()
score = disc(self.default_inputx256.cuda())
assert score.shape == (2, 1, 30, 30)
# test args system
cfg = deepcopy(self.default_cfg)
cfg['num_conv'] = 4
disc = PatchDiscriminator(**cfg).cuda()
score = disc(self.default_inputx256.cuda())
assert score.shape == (2, 1, 14, 14)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models import ProjDiscriminator as SAGANDiscriminator
from mmgen.models import SNGANGenerator as SAGANGenerator
from mmgen.models import build_module
class TestSAGANGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((2, 128))
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='SAGANGenerator',
base_channels=32,
noise_size=128,
output_scale=32,
attention_cfg=dict(type='SelfAttentionBlock'),
attention_after_nth_block=2,
num_classes=10)
def test_sagan_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test return noise
x = g(None, num_batches=2, return_noise=True)
assert x['fake_img'].shape == (2, 3, 32, 32)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(self.noise, label=self.label, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(torch.randn, num_batches=2, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
# test different output_scale
config = deepcopy(self.default_config)
config['output_scale'] = 64
g = build_module(config)
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 64, 64)
# test different attention_after_nth_block
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = [1, 2]
g = build_module(config)
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# wrong type of attention_after_nth_block --> wrong type
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = '1'
with pytest.raises(ValueError):
g = build_module(config)
# wrong type of attention_after_nth_block --> wrong type of list
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = ['1', '2']
with pytest.raises(ValueError):
g = build_module(config)
# test init_cfg --> SAGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
g = build_module(config)
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_sagan_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test return noise
x = g(None, num_batches=2, return_noise=True)
assert x['fake_img'].shape == (2, 3, 32, 32)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(self.noise.cuda(), label=self.label.cuda(), return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(torch.randn, num_batches=2, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
# test different output_scale
config = deepcopy(self.default_config)
config['output_scale'] = 64
g = build_module(config).cuda()
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 64, 64)
# test different attention_after_nth_block
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = [1, 2]
g = build_module(config).cuda()
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test init_cfg --> SAGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
g = build_module(config).cuda()
assert isinstance(g, SAGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
class TestSAGANDiscriminator(object):
@classmethod
def setup_class(cls):
cls.x = torch.randn((2, 3, 32, 32))
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='SAGANDiscriminator',
input_scale=32,
num_classes=10,
input_channels=3)
def test_sngan_proj_discriminator(self):
# test default setting with builder
d = build_module(self.default_config)
assert isinstance(d, SAGANDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different input_scale
config = deepcopy(self.default_config)
config['input_scale'] = 64
d = build_module(config)
assert isinstance(d, SAGANDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x, self.label)
assert score.shape == (2, 1)
# test num_classes == 0 (w/o proj_y)
config = deepcopy(self.default_config)
config['num_classes'] = 0
d = build_module(config)
assert isinstance(d, SAGANDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 128
d = build_module(config)
assert isinstance(d, SAGANDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different attention_after_nth_block
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = [1, 2]
d = build_module(config)
assert isinstance(d, SAGANDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# wrong type of attention_after_nth_block --> wrong type
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = '1'
with pytest.raises(ValueError):
d = build_module(config)
# wrong type of attention_after_nth_block --> wrong type of list
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = ['1', '2']
with pytest.raises(ValueError):
d = build_module(config)
# test init_cfg --> SAGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
d = build_module(config)
assert isinstance(d, SAGANDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_lsgan_discriminator_cuda(self):
# test default setting with builder
d = build_module(self.default_config).cuda()
assert isinstance(d, SAGANDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different input_scale
config = deepcopy(self.default_config)
config['input_scale'] = 64
d = build_module(config).cuda()
assert isinstance(d, SAGANDiscriminator)
x = torch.randn((2, 3, 64, 64)).cuda()
score = d(x, self.label.cuda())
assert score.shape == (2, 1)
# test num_classes == 0 (w/o proj_y)
config = deepcopy(self.default_config)
config['num_classes'] = 0
d = build_module(config).cuda()
assert isinstance(d, SAGANDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 128
d = build_module(config).cuda()
assert isinstance(d, SAGANDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different attention_after_nth_block
config = deepcopy(self.default_config)
config['attention_after_nth_block'] = [1, 2]
d = build_module(config).cuda()
assert isinstance(d, SAGANDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test init_cfg --> SAGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
d = build_module(config).cuda()
assert isinstance(d, SAGANDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.architectures.singan import (SinGANMSGeneratorPE,
SinGANMultiScaleDiscriminator,
SinGANMultiScaleGenerator)
class TestSinGANGen:
@classmethod
def setup_class(cls):
cls.default_args = dict(
in_channels=3,
out_channels=3,
kernel_size=3,
padding=0,
num_layers=3,
base_channels=32,
num_scales=3,
min_feat_channels=16)
cls.fixed_noises = [
torch.randn(1, 3, 8, 8),
torch.randn(1, 3, 10, 10),
torch.randn(1, 3, 12, 12),
torch.randn(1, 3, 16, 16)
]
cls.input_sample = torch.zeros_like(cls.fixed_noises[0])
cls.noise_weights = [1., 0.5, 0.5, 0.5]
def test_singan_gen(self):
gen = SinGANMultiScaleGenerator(**self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
output = gen(
self.input_sample,
self.fixed_noises,
self.noise_weights,
'rand',
2,
get_prev_res=True)
assert output['prev_res_list'][0].shape == (1, 3, 8, 8)
class TestSinGANPEGen:
@classmethod
def setup_class(cls):
cls.default_args = dict(
in_channels=3,
out_channels=3,
kernel_size=3,
num_layers=3,
base_channels=32,
num_scales=3,
min_feat_channels=16)
cls.fixed_noises = [
torch.randn(1, 1, 8, 8),
torch.randn(1, 3, 10, 10),
torch.randn(1, 3, 12, 12),
torch.randn(1, 3, 16, 16)
]
cls.input_sample = torch.zeros((1, 3, 8, 8))
cls.noise_weights = [1., 0.5, 0.5, 0.5]
def test_singan_gen_pe(self):
gen = SinGANMSGeneratorPE(**self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
output = gen(
self.input_sample,
self.fixed_noises,
self.noise_weights,
'rand',
2,
get_prev_res=True)
assert output['prev_res_list'][0].shape == (1, 3, 8, 8)
gen = SinGANMSGeneratorPE(padding_mode='reflect', **self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
with pytest.raises(NotImplementedError):
_ = SinGANMSGeneratorPE(
padding_mode='circular', **self.default_args)
gen = SinGANMSGeneratorPE(
padding=1, pad_at_head=False, **self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
gen = SinGANMSGeneratorPE(
pad_at_head=True, interp_pad=True, **self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
gen = SinGANMSGeneratorPE(
positional_encoding=dict(
type='SPE2d', embedding_dim=4, padding_idx=0),
allow_no_residual=True,
first_stage_in_channels=8,
**self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
gen = SinGANMSGeneratorPE(
positional_encoding=dict(type='CSG2d'),
allow_no_residual=True,
first_stage_in_channels=2,
**self.default_args)
res = gen(self.input_sample, self.fixed_noises, self.noise_weights,
'rand', 2)
assert res.shape == (1, 3, 12, 12)
class TestSinGANDisc:
@classmethod
def setup_class(cls):
cls.default_args = dict(
in_channels=3,
kernel_size=3,
padding=0,
num_layers=3,
base_channels=32,
num_scales=3,
min_feat_channels=16)
def test_singan_disc(self):
disc = SinGANMultiScaleDiscriminator(**self.default_args)
img = torch.randn(1, 3, 24, 24)
res = disc(img, 2)
assert res.shape[0] == 1
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models import ProjDiscriminator, SNGANGenerator, build_module
class TestSNGANPROJGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((2, 128))
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='SNGANGenerator',
noise_size=128,
output_scale=32,
num_classes=10,
base_channels=32)
def test_sngan_proj_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test return noise
x = g(None, num_batches=2, return_noise=True)
assert x['fake_img'].shape == (2, 3, 32, 32)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(self.noise, label=self.label, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(torch.randn, num_batches=2, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
# test different output_scale
config = deepcopy(self.default_config)
config['output_scale'] = 64
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 64, 64)
# test num_classes == 0 and `use_cbn = True`
config = deepcopy(self.default_config)
config['num_classes'] = 0
with pytest.raises(ValueError):
g = build_module(config)
# test num_classes == 0 and `use_cbn = False`
config = deepcopy(self.default_config)
config['num_classes'] = 0
config['use_cbn'] = False
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 64
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different channels_cfg --> list
config = deepcopy(self.default_config)
config['channels_cfg'] = [1, 1, 1]
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different channels_cfg --> dict
config = deepcopy(self.default_config)
config['channels_cfg'] = {32: [1, 1, 1], 64: [16, 8, 4, 2]}
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different channels_cfg --> error (key not find)
config = deepcopy(self.default_config)
config['channels_cfg'] = {64: [16, 8, 4, 2]}
with pytest.raises(KeyError):
g = build_module(config)
# test different channels_cfg --> error (type not match)
config = deepcopy(self.default_config)
config['channels_cfg'] = '1234'
with pytest.raises(ValueError):
g = build_module(config)
# test different act_cfg
config = deepcopy(self.default_config)
config['act_cfg'] = dict(type='Sigmoid')
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test with_spectral_norm
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test with_embedding_spectral_norm
config = deepcopy(self.default_config)
config['with_embedding_spectral_norm'] = True
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test norm_eps
config = deepcopy(self.default_config)
config['norm_eps'] = 1e-9
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test sn_eps
config = deepcopy(self.default_config)
config['sn_eps'] = 1e-12
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> Studio
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='studio')
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> BigGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='biggan')
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> SNGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
g = build_module(config)
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
g = build_module(config)
# test pretrained --> raise error
config = deepcopy(self.default_config)
config['pretrained'] = 42
with pytest.raises(TypeError):
g = build_module(config)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_sngan_proj_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test return noise
x = g(None, num_batches=2, return_noise=True)
assert x['fake_img'].shape == (2, 3, 32, 32)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(self.noise.cuda(), label=self.label.cuda(), return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
x = g(torch.randn, num_batches=2, return_noise=True)
assert x['noise_batch'].shape == (2, 128)
assert x['label'].shape == (2, )
# test different output_scale
config = deepcopy(self.default_config)
config['output_scale'] = 64
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 64, 64)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 64
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different channels_cfg --> list
config = deepcopy(self.default_config)
config['channels_cfg'] = [1, 1, 1]
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different channels_cfg --> dict
config = deepcopy(self.default_config)
config['channels_cfg'] = {32: [1, 1, 1], 64: [16, 8, 4, 2]}
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different act_cfg
config = deepcopy(self.default_config)
config['act_cfg'] = dict(type='Sigmoid')
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test with_spectral_norm
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test with_embedding_spectral_norm
config = deepcopy(self.default_config)
config['with_embedding_spectral_norm'] = True
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test norm_eps
config = deepcopy(self.default_config)
config['norm_eps'] = 1e-9
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test sn_eps
config = deepcopy(self.default_config)
config['sn_eps'] = 1e-12
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2).cuda()
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> BigGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='biggan')
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
# test different init_cfg --> SNGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
g = build_module(config).cuda()
assert isinstance(g, SNGANGenerator)
x = g(None, num_batches=2)
assert x.shape == (2, 3, 32, 32)
class TestSNGANPROJDiscriminator(object):
@classmethod
def setup_class(cls):
cls.x = torch.randn((2, 3, 32, 32))
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='ProjDiscriminator',
input_scale=32,
num_classes=10,
input_channels=3)
def test_sngan_proj_discriminator(self):
# test default setting with builder
d = build_module(self.default_config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different input_scale
config = deepcopy(self.default_config)
config['input_scale'] = 64
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
x = torch.randn((2, 3, 64, 64))
score = d(x, self.label)
assert score.shape == (2, 1)
# test num_classes == 0 (w/o proj_y)
config = deepcopy(self.default_config)
config['num_classes'] = 0
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 128
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different channels_cfg --> list
config = deepcopy(self.default_config)
config['channels_cfg'] = [1, 1, 1]
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different channels_cfg --> dict
config = deepcopy(self.default_config)
config['channels_cfg'] = {32: [1, 1, 1], 64: [2, 4, 8, 16]}
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different channels_cfg --> error (key not find)
config = deepcopy(self.default_config)
config['channels_cfg'] = {64: [2, 4, 8, 16]}
with pytest.raises(KeyError):
d = build_module(config)
# test different channels_cfg --> error (type not match)
config = deepcopy(self.default_config)
config['channels_cfg'] = '1234'
with pytest.raises(ValueError):
d = build_module(config)
# test different downsample_cfg --> list
config = deepcopy(self.default_config)
config['downsample_cfg'] = [True, False, False]
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different downsample_cfg --> dict
config = deepcopy(self.default_config)
config['downsample_cfg'] = {
32: [True, False, False],
64: [True, True, True, True]
}
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different downsample_cfg --> error (key not find)
config = deepcopy(self.default_config)
config['downsample_cfg'] = {64: [True, True, True, True]}
with pytest.raises(KeyError):
d = build_module(config)
# test different downsample_cfg --> error (type not match)
config = deepcopy(self.default_config)
config['downsample_cfg'] = '1234'
with pytest.raises(ValueError):
d = build_module(config)
# test downsample_cfg and channels_cfg not match
config = deepcopy(self.default_config)
config['downsample_cfg'] = [True, False, False]
config['channels_cfg'] = [1, 1, 1, 1]
with pytest.raises(ValueError):
d = build_module(config)
# test different act_cfg
config = deepcopy(self.default_config)
config['act_cfg'] = dict(type='Sigmoid')
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different with_spectral_norm
config = deepcopy(self.default_config)
config['with_spectral_norm'] = False
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different init_cfg --> studio
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='studio')
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different init_cfg --> BigGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='biggan')
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different init_cfg --> sngan
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan-proj')
d = build_module(config)
assert isinstance(d, ProjDiscriminator)
score = d(self.x, self.label)
assert score.shape == (2, 1)
# test different init_cfg --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
d = build_module(config)
# test pretrained --> raise error
config = deepcopy(self.default_config)
config['pretrained'] = 42
with pytest.raises(TypeError):
d = build_module(config)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_sngan_proj_discriminator_cuda(self):
# test default setting with builder
d = build_module(self.default_config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different input_scale
config = deepcopy(self.default_config)
config['input_scale'] = 64
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
x = torch.randn((2, 3, 64, 64)).cuda()
score = d(x, self.label.cuda())
assert score.shape == (2, 1)
# test num_classes == 0 (w/o proj_y)
config = deepcopy(self.default_config)
config['num_classes'] = 0
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different base_channels
config = deepcopy(self.default_config)
config['base_channels'] = 128
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different channels_cfg --> list
config = deepcopy(self.default_config)
config['channels_cfg'] = [1, 1, 1]
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different channels_cfg --> dict
config = deepcopy(self.default_config)
config['channels_cfg'] = {32: [1, 1, 1], 64: [2, 4, 8, 16]}
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different downsample_cfg --> list
config = deepcopy(self.default_config)
config['downsample_cfg'] = [True, False, False]
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different downsample_cfg --> dict
config = deepcopy(self.default_config)
config['downsample_cfg'] = {
32: [True, False, False],
64: [True, True, True, True]
}
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different act_cfg
config = deepcopy(self.default_config)
config['act_cfg'] = dict(type='Sigmoid')
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different with_spectral_norm
config = deepcopy(self.default_config)
config['with_spectral_norm'] = False
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different init_cfg --> BigGAN
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='biggan')
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
# test different init_cfg --> sngan
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan-proj')
d = build_module(config).cuda()
assert isinstance(d, ProjDiscriminator)
score = d(self.x.cuda(), self.label.cuda())
assert score.shape == (2, 1)
class TestSNGANGenResBlock(object):
@classmethod
def setup_class(cls):
cls.input = torch.randn(2, 16, 5, 5)
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='SNGANGenResBlock',
num_classes=10,
in_channels=16,
out_channels=16,
use_cbn=True,
use_norm_affine=False,
norm_cfg=dict(type='BN'),
upsample_cfg=dict(type='nearest', scale_factor=2),
upsample=True,
init_cfg=dict(type='BigGAN'))
def test_snganGenResBlock(self):
# test default config
block = build_module(self.default_config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 10, 10)
# test no upsample config and no learnable sc
config = deepcopy(self.default_config)
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test learnable shortcut + w/o upsample
config = deepcopy(self.default_config)
config['out_channels'] = 32
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 32, 5, 5)
# test init_cfg + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test init_cfg == studio + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='studio')
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test init_cfg == sagan + learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 10, 10)
# test init_cfg == sagan + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test init_cft --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
block = build_module(config)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 10, 10)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_snganGenResBlock_cuda(self):
# test default config
block = build_module(self.default_config).cuda()
out = block(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 16, 10, 10)
# test no upsample config and no learnable sc
config = deepcopy(self.default_config)
config['upsample'] = False
block = build_module(config).cuda()
out = block(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 16, 5, 5)
# test init_cfg == studio + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='studio')
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test init_cfg == sagan + learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 10, 10)
# test init_cfg == sagan + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sagan')
config['upsample'] = False
block = build_module(config)
out = block(self.input, self.label)
assert out.shape == (2, 16, 5, 5)
# test learnable shortcut + w/o upsample
config = deepcopy(self.default_config)
config['out_channels'] = 32
config['upsample'] = False
block = build_module(config).cuda()
out = block(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 32, 5, 5)
# test init_cfg + w/o learnable shortcut
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
config['upsample'] = False
block = build_module(config).cuda()
out = block(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 16, 5, 5)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config).cuda()
out = block(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 16, 10, 10)
class TestSNDiscResBlock(object):
@classmethod
def setup_class(cls):
cls.input = torch.randn(2, 16, 10, 10)
cls.default_config = dict(
type='SNGANDiscResBlock',
in_channels=16,
out_channels=16,
downsample=True,
init_cfg=dict(type='BigGAN'))
def test_snganDiscResBlock(self):
# test default config
block = build_module(self.default_config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test w/o learnabel shortcut + w/o downsample
config = deepcopy(self.default_config)
config['downsample'] = False
config['out_channels'] = 8
block = build_module(config)
out = block(self.input)
assert out.shape == (2, 8, 10, 10)
# test init cfg + w or w/o downsample
for init_method in [
'studio', 'biggan', 'sagan', 'sngan', 'sngan-proj', 'gan-proj'
]:
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type=init_method)
config['out_channels'] = 8
for downsample in [True, False]:
config['downsample'] = downsample
block = build_module(config)
out = block(self.input)
if downsample:
assert out.shape == (2, 8, 5, 5)
else:
assert out.shape == (2, 8, 10, 10)
# test init_cft --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
block = build_module(config)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_snganDiscResBlock_cuda(self):
# test default config
block = build_module(self.default_config).cuda()
out = block(self.input.cuda())
assert out.shape == (2, 16, 5, 5)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config).cuda()
out = block(self.input.cuda())
assert out.shape == (2, 16, 5, 5)
# test w/o learnabel shortcut + w/o downsample
config = deepcopy(self.default_config)
config['downsample'] = False
config['out_channels'] = 8
block = build_module(config).cuda()
out = block(self.input.cuda())
assert out.shape == (2, 8, 10, 10)
# test init cfg + w or w/o downsample
for init_method in [
'studio', 'biggan', 'sagan', 'sngan', 'sngan-proj', 'gan-proj'
]:
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type=init_method)
config['out_channels'] = 8
for downsample in [True, False]:
config['downsample'] = downsample
block = build_module(config)
out = block(self.input)
if downsample:
assert out.shape == (2, 8, 5, 5)
else:
assert out.shape == (2, 8, 10, 10)
class TestSNDiscHeadResBlock(object):
@classmethod
def setup_class(cls):
cls.input = torch.randn(2, 16, 10, 10)
cls.default_config = dict(
type='SNGANDiscHeadResBlock',
in_channels=16,
out_channels=16,
init_cfg=dict(type='BigGAN'))
def test_snganDiscHeadResBlock(self):
# test default config
block = build_module(self.default_config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test init cfg + w or w/o downsample
for init_method in [
'studio', 'biggan', 'sagan', 'sngan', 'sngan-proj', 'gan-proj'
]:
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type=init_method)
block = build_module(config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test init_cft --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
block = build_module(config)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_snganDiscHeadResBlock_cuda(self):
# test default config
block = build_module(self.default_config).cuda()
out = block(self.input.cuda())
assert out.shape == (2, 16, 5, 5)
# test init cfg + w or w/o downsample
for init_method in [
'studio', 'biggan', 'sagan', 'sngan', 'sngan-proj', 'gan-proj'
]:
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type=init_method)
block = build_module(config)
out = block(self.input)
assert out.shape == (2, 16, 5, 5)
# test conv_cfg
config = deepcopy(self.default_config)
config['conv_cfg'] = dict(
kernel_size=1, stride=1, padding=0, act_cfg=None)
block = build_module(config).cuda()
out = block(self.input.cuda())
assert out.shape == (2, 16, 5, 5)
class TestSNConditionalNorm(object):
@classmethod
def setup_class(cls):
cls.input = torch.randn((2, 4, 4, 4))
cls.label = torch.randint(0, 10, (2, ))
cls.default_config = dict(
type='SNConditionNorm',
in_channels=4,
num_classes=10,
use_cbn=True,
cbn_norm_affine=False,
init_cfg=dict(type='BigGAN'))
def test_conditionalNorm(self):
# test build from default config
norm = build_module(self.default_config)
out = norm(self.input, self.label)
assert out.shape == (2, 4, 4, 4)
# test w/o use_cbn
config = deepcopy(self.default_config)
config['use_cbn'] = False
norm = build_module(config)
out = norm(self.input)
assert out.shape == (2, 4, 4, 4)
# test num_class < 0 and cbn = False
config = deepcopy(self.default_config)
config['num_classes'] = 0
config['use_cbn'] = False
norm = build_module(config)
out = norm(self.input)
assert out.shape == (2, 4, 4, 4)
# test num_classes <= 0 and cbn = True
config = deepcopy(self.default_config)
config['num_classes'] = 0
with pytest.raises(ValueError):
norm = build_module(config)
# test IN
config = deepcopy(self.default_config)
config['norm_cfg'] = dict(type='IN')
norm = build_module(config)
out = norm(self.input, self.label)
assert out.shape == (2, 4, 4, 4)
# test sn_style == ajbrock
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'ajbrock'
norm = build_module(config)
out = norm(self.input, self.label)
for buffer in ['u0', 'sv0']:
assert hasattr(norm.weight_embedding, buffer)
assert hasattr(norm.bias_embedding, buffer)
assert out.shape == (2, 4, 4, 4)
# test sn_style == torch
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'torch'
norm = build_module(config)
out = norm(self.input, self.label)
for buffer in ['weight_u', 'weight_v', 'weight_orig']:
assert hasattr(norm.weight_embedding, buffer)
assert hasattr(norm.bias_embedding, buffer)
assert out.shape == (2, 4, 4, 4)
# test sn_style --> raise error
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'studio'
with pytest.raises(NotImplementedError):
norm = build_module(config)
# test SyncBN
# config = deepcopy(self.default_config)
# config['norm_cfg'] = dict(type='SyncBN')
# norm = build_module(config)
# out = norm(self.input, self.label)
# assert out.shape == (2, 4, 4, 4)
# test unknown norm type
config = deepcopy(self.default_config)
config['norm_cfg'] = dict(type='GN')
with pytest.raises(ValueError):
norm = build_module(config)
# test init_cfg
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
norm = build_module(config)
out = norm(self.input, self.label)
assert out.shape == (2, 4, 4, 4)
# test init_cft --> raise error
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='wgan-gp')
with pytest.raises(NotImplementedError):
norm = build_module(config)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_conditionalNorm_cuda(self):
# test build from default config
norm = build_module(self.default_config).cuda()
out = norm(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 4, 4, 4)
# test w/o use_cbn
config = deepcopy(self.default_config)
config['use_cbn'] = False
norm = build_module(config).cuda()
out = norm(self.input.cuda())
assert out.shape == (2, 4, 4, 4)
# test num_class < 0 and cbn = False
config = deepcopy(self.default_config)
config['num_classes'] = 0
config['use_cbn'] = False
norm = build_module(config).cuda()
out = norm(self.input.cuda())
assert out.shape == (2, 4, 4, 4)
# test num_classes <= 0 and cbn = True
config = deepcopy(self.default_config)
config['num_classes'] = 0
with pytest.raises(ValueError):
norm = build_module(config)
# test IN
config = deepcopy(self.default_config)
config['norm_cfg'] = dict(type='IN')
norm = build_module(config).cuda()
out = norm(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 4, 4, 4)
# test sn_style == ajbrock
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'ajbrock'
norm = build_module(config)
out = norm(self.input, self.label)
for buffer in ['u0', 'sv0']:
assert hasattr(norm.weight_embedding, buffer)
assert hasattr(norm.bias_embedding, buffer)
assert out.shape == (2, 4, 4, 4)
# test sn_style == torch
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'torch'
norm = build_module(config)
out = norm(self.input, self.label)
for buffer in ['weight_u', 'weight_v', 'weight_orig']:
assert hasattr(norm.weight_embedding, buffer)
assert hasattr(norm.bias_embedding, buffer)
assert out.shape == (2, 4, 4, 4)
# test sn_style --> raise error
config = deepcopy(self.default_config)
config['with_spectral_norm'] = True
config['sn_style'] = 'studio'
with pytest.raises(NotImplementedError):
norm = build_module(config)
# test SyncBN
# config = deepcopy(self.default_config)
# config['norm_cfg'] = dict(type='SyncBN')
# norm = build_module(config)
# out = norm(self.input, self.label)
# assert out.shape == (2, 4, 4, 4)
# test unknown norm type
config = deepcopy(self.default_config)
config['norm_cfg'] = dict(type='GN')
with pytest.raises(ValueError):
norm = build_module(config)
# test init_cfg
config = deepcopy(self.default_config)
config['init_cfg'] = dict(type='sngan')
norm = build_module(config).cuda()
out = norm(self.input.cuda(), self.label.cuda())
assert out.shape == (2, 4, 4, 4)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures.stylegan import (StyleGAN1Discriminator,
StyleGANv1Generator)
from mmgen.models.architectures.stylegan.modules.styleganv1_modules import (
AdaptiveInstanceNorm, StyleConv)
class TestAdaptiveInstanceNorm:
@classmethod
def setup_class(cls):
cls.in_channel = 512
cls.style_dim = 512
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_adain_cuda(self):
adain = AdaptiveInstanceNorm(self.in_channel, self.style_dim).cuda()
x = torch.randn((2, 512, 8, 8)).cuda()
style = torch.randn((2, 512)).cuda()
res = adain(x, style)
assert res.shape == (2, 512, 8, 8)
class TestStyleConv:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=512,
out_channels=256,
kernel_size=3,
style_channels=512,
padding=1,
initial=False,
blur_kernel=[1, 2, 1],
upsample=True,
fused=False)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_styleconv_cuda(self):
conv = StyleConv(**self.default_cfg).cuda()
input_x = torch.randn((2, 512, 32, 32)).cuda()
input_style1 = torch.randn((2, 512)).cuda()
input_style2 = torch.randn((2, 512)).cuda()
res = conv(input_x, input_style1, input_style2)
assert res.shape == (2, 256, 64, 64)
class TestStyleGAN1Generator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
out_size=256,
style_channels=512,
num_mlps=8,
blur_kernel=[1, 2, 1],
lr_mlp=0.01,
default_style_mode='mix',
eval_style_mode='single',
mix_prob=0.9)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_g_cuda(self):
# test default config
g = StyleGANv1Generator(**self.default_cfg).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
styles = [torch.randn((1, 512)).cuda() for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)).cuda() for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 128
g = StyleGANv1Generator(**cfg_).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test generate function
truncation_latent = g.get_mean_latent()
assert truncation_latent.shape == (1, 512)
style_mixing_images = g.style_mixing(
curr_scale=32,
truncation_latent=truncation_latent,
n_source=4,
n_target=4)
assert style_mixing_images.shape == (25, 3, 32, 32)
def test_g_cpu(self):
# test default config
g = StyleGANv1Generator(**self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
styles = [torch.randn((1, 512)) for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 256, 256)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)) for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 128
g = StyleGANv1Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test generate function
truncation_latent = g.get_mean_latent()
assert truncation_latent.shape == (1, 512)
style_mixing_images = g.style_mixing(
curr_scale=32,
truncation_latent=truncation_latent,
n_source=4,
n_target=4)
assert style_mixing_images.shape == (25, 3, 32, 32)
# set mix_prob as 1.0 and 0.0 to force cover lines
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 1
g = StyleGANv1Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 256, 256)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 1
g = StyleGANv1Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 0
g = StyleGANv1Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 256, 256)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 0
g = StyleGANv1Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
class TestStyleGANv1Disc:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=64)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_stylegan1_disc_cuda(self):
d = StyleGAN1Discriminator(**self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
def test_stylegan1_disc_cpu(self):
d = StyleGAN1Discriminator(**self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures.stylegan.generator_discriminator_v2 import (
ADAStyleGAN2Discriminator, StyleGAN2Discriminator, StyleGANv2Generator)
from mmgen.models.architectures.stylegan.modules import (Blur,
ModulatedStyleConv,
ModulatedToRGB)
from mmgen.models.architectures.stylegan.mspie import (
MSStyleGAN2Discriminator, MSStyleGANv2Generator)
class TestBlur:
@classmethod
def setup_class(cls):
cls.kernel = [1, 3, 3, 1]
cls.pad = (1, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_blur_cuda(self):
blur = Blur(self.kernel, self.pad)
x = torch.randn((2, 3, 8, 8))
res = blur(x)
assert res.shape == (2, 3, 7, 7)
class TestModStyleConv:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=1,
kernel_size=3,
style_channels=5,
upsample=True)
def test_mod_styleconv_cpu(self):
conv = ModulatedStyleConv(**self.default_cfg)
input_x = torch.randn((2, 3, 4, 4))
input_style = torch.randn((2, 5))
res = conv(input_x, input_style)
assert res.shape == (2, 1, 8, 8)
_cfg = deepcopy(self.default_cfg)
_cfg['upsample'] = False
conv = ModulatedStyleConv(**_cfg)
input_x = torch.randn((2, 3, 4, 4))
input_style = torch.randn((2, 5))
res = conv(input_x, input_style)
assert res.shape == (2, 1, 4, 4)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_mod_styleconv_cuda(self):
conv = ModulatedStyleConv(**self.default_cfg).cuda()
input_x = torch.randn((2, 3, 4, 4)).cuda()
input_style = torch.randn((2, 5)).cuda()
res = conv(input_x, input_style)
assert res.shape == (2, 1, 8, 8)
_cfg = deepcopy(self.default_cfg)
_cfg['upsample'] = False
conv = ModulatedStyleConv(**_cfg).cuda()
input_x = torch.randn((2, 3, 4, 4)).cuda()
input_style = torch.randn((2, 5)).cuda()
res = conv(input_x, input_style)
assert res.shape == (2, 1, 4, 4)
class TestToRGB:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_channels=5, style_channels=5, out_channels=3)
def test_torgb_cpu(self):
model = ModulatedToRGB(**self.default_cfg)
input_x = torch.randn((2, 5, 4, 4))
style = torch.randn((2, 5))
res = model(input_x, style)
assert res.shape == (2, 3, 4, 4)
input_x = torch.randn((2, 5, 8, 8))
style = torch.randn((2, 5))
skip = torch.randn(2, 3, 4, 4)
res = model(input_x, style, skip)
assert res.shape == (2, 3, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_torgb_cuda(self):
model = ModulatedToRGB(**self.default_cfg).cuda()
input_x = torch.randn((2, 5, 4, 4)).cuda()
style = torch.randn((2, 5)).cuda()
res = model(input_x, style)
assert res.shape == (2, 3, 4, 4)
input_x = torch.randn((2, 5, 8, 8)).cuda()
style = torch.randn((2, 5)).cuda()
skip = torch.randn(2, 3, 4, 4).cuda()
res = model(input_x, style, skip)
assert res.shape == (2, 3, 8, 8)
class TestStyleGAN2Generator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
out_size=64, style_channels=16, num_mlps=4, channel_multiplier=1)
def test_stylegan2_g_cpu(self):
# test default config
g = StyleGANv2Generator(**self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
truncation_mean = g.get_mean_latent()
res = g(
None,
num_batches=2,
randomize_noise=False,
truncation=0.7,
truncation_latent=truncation_mean)
assert res.shape == (2, 3, 64, 64)
res = g.style_mixing(2, 2, truncation_latent=truncation_mean)
assert res.shape[2] == 64
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)) for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)) for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
# set mix_prob as 1.0 and 0 to force cover lines
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 1
g = StyleGANv2Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 64, 64)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 1
g = StyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 0
g = StyleGANv2Generator(**cfg_)
res = g(torch.randn, num_batches=2)
assert res.shape == (2, 3, 64, 64)
cfg_ = deepcopy(self.default_cfg)
cfg_['mix_prob'] = 0
g = StyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_fp16_stylegan2_G_cuda(self):
g = StyleGANv2Generator(**self.default_cfg, num_fp16_scales=2).cuda()
res = g(None, num_batches=2)
assert res.dtype == torch.float32
g = StyleGANv2Generator(**self.default_cfg, fp16_enabled=True).cuda()
res = g(None, num_batches=2)
assert res.dtype == torch.float32
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_g_cuda(self):
# test default config
g = StyleGANv2Generator(**self.default_cfg).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)).cuda() for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)).cuda() for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
class TestMSStyleGAN2Generator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
out_size=64, style_channels=16, num_mlps=4, channel_multiplier=1)
def test_msstylegan2_g_cpu(self):
# test default config
g = MSStyleGANv2Generator(**self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)) for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)) for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = MSStyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
g = MSStyleGANv2Generator(deconv2conv=True, **self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
g = MSStyleGANv2Generator(deconv2conv=True, **self.default_cfg)
truncation_mean = g.get_mean_latent()
res = g(
None,
num_batches=2,
randomize_noise=False,
chosen_scale=2,
truncation=0.7,
truncation_latent=truncation_mean)
assert res.shape == (2, 3, 96, 96)
res = g.style_mixing(2, 2, truncation_latent=truncation_mean)
assert res.shape[2] == 64
g = MSStyleGANv2Generator(
no_pad=True, deconv2conv=True, interp_pad=4, **self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
g = MSStyleGANv2Generator(
deconv2conv=True, up_after_conv=True, **self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
g = MSStyleGANv2Generator(
deconv2conv=True, up_after_conv=True, **self.default_cfg)
res = g(None, num_batches=2, chosen_scale=4)
assert res.shape == (2, 3, 128, 128)
g = MSStyleGANv2Generator(
deconv2conv=True, up_after_conv=True, **self.default_cfg)
res = g(None, num_batches=2, chosen_scale=(4, 4))
assert res.shape == (2, 3, 128, 128)
g = MSStyleGANv2Generator(
head_pos_encoding=dict(
type='SPE', embedding_dim=256, padding_idx=0, init_size=128),
**self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
g = MSStyleGANv2Generator(
head_pos_encoding=dict(
type='SPE', embedding_dim=256, padding_idx=0, init_size=128),
interp_head=True,
**self.default_cfg)
res = g(None, num_batches=1, chosen_scale=(4, 4))
assert res.shape == (1, 3, 128, 128)
g = MSStyleGANv2Generator(
head_pos_encoding=dict(type='CatersianGrid'), **self.default_cfg)
res = g(None, num_batches=2, chosen_scale=(4, 4))
assert res.shape == (2, 3, 128, 128)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_ms_g_cuda(self):
# test default config
g = MSStyleGANv2Generator(**self.default_cfg).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)).cuda() for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)).cuda() for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = MSStyleGANv2Generator(**cfg_).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
class TestStyleGANv2Disc:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=64, channel_multiplier=1)
def test_stylegan2_disc_cpu(self):
d = StyleGAN2Discriminator(**self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_stylegan2_disc_cuda(self):
d = StyleGAN2Discriminator(**self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_fp16_stylegan2_disc_cuda(self):
d = StyleGAN2Discriminator(
**self.default_cfg, num_fp16_scales=2).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
assert score.dtype == torch.float32
class TestADAStyleGAN2Discriminator:
@classmethod
def setup_class(cls):
aug_kwargs = {
'xflip': 1,
'rotate90': 1,
'xint': 1,
'scale': 1,
'rotate': 1,
'aniso': 1,
'xfrac': 1,
'brightness': 1,
'contrast': 1,
'lumaflip': 1,
'hue': 1,
'saturation': 1
}
cls.default_cfg = dict(
in_size=64,
input_bgr2rgb=True,
data_aug=dict(
type='ADAAug',
update_interval=2,
aug_pipeline=aug_kwargs,
ada_kimg=100))
def test_ada_stylegan2_disc_cpu(self):
d = ADAStyleGAN2Discriminator(**self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
# test ada p update
curr_iter = 0
batch_size = 2
score = torch.tensor([1., 1.])
d.ada_aug.log_buffer[0] += 2
d.ada_aug.log_buffer[1] += score.sign().sum()
d.ada_aug.update(iteration=curr_iter, num_batches=batch_size)
assert d.ada_aug.aug_pipeline.p == 0.
curr_iter += 1
d.ada_aug.log_buffer[0] += 2
d.ada_aug.log_buffer[1] += score.sign().sum()
d.ada_aug.update(iteration=curr_iter, num_batches=batch_size)
assert d.ada_aug.aug_pipeline.p == 4.0000e-05
# test with p=1.
d.ada_aug.aug_pipeline.p.copy_(torch.tensor(1.))
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_ada_stylegan2_disc_cuda(self):
d = ADAStyleGAN2Discriminator(**self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
# test ada p update
curr_iter = 0
batch_size = 2
score = torch.tensor([1., 1.]).cuda()
d.ada_aug.log_buffer[0] += 2
d.ada_aug.log_buffer[1] += score.sign().sum()
d.ada_aug.update(iteration=curr_iter, num_batches=batch_size)
assert d.ada_aug.aug_pipeline.p == 0.
curr_iter += 1
d.ada_aug.log_buffer[0] += 2
d.ada_aug.log_buffer[1] += score.sign().sum()
d.ada_aug.update(iteration=curr_iter, num_batches=batch_size)
assert d.ada_aug.aug_pipeline.p == 4.0000e-05
# test with p=1.
d.ada_aug.aug_pipeline.p.copy_(torch.tensor(1.))
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
class TestMSStyleGANv2Disc:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=64, channel_multiplier=1)
def test_msstylegan2_disc_cpu(self):
d = MSStyleGAN2Discriminator(**self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
d = MSStyleGAN2Discriminator(
with_adaptive_pool=True, **self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_msstylegan2_disc_cuda(self):
d = MSStyleGAN2Discriminator(**self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
d = MSStyleGAN2Discriminator(
with_adaptive_pool=True, **self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmgen.models.architectures.stylegan import StyleGANv3Generator
from mmgen.models.architectures.stylegan.modules import (MappingNetwork,
SynthesisInput,
SynthesisLayer,
SynthesisNetwork)
class TestMappingNetwork:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
noise_size=4,
c_dim=0,
style_channels=4,
num_ws=2,
num_layers=2,
lr_multiplier=0.01,
w_avg_beta=0.998)
def test_cpu(self):
module = MappingNetwork(**self.default_cfg)
z = torch.randn([1, 4])
c = None
y = module(z, c)
assert y.shape == (1, 2, 4)
# test update_emas
y = module(z, c, update_emas=True)
assert y.shape == (1, 2, 4)
# test truncation
y = module(z, c, truncation=2)
assert y.shape == (1, 2, 4)
# test with c_dim>0
cfg = deepcopy(self.default_cfg)
cfg.update(c_dim=2)
module = MappingNetwork(**cfg)
z = torch.randn([2, 4])
c = torch.eye(2)
y = module(z, c)
assert y.shape == (2, 2, 4)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cuda(self):
module = MappingNetwork(**self.default_cfg).cuda()
z = torch.randn([1, 4]).cuda()
c = None
y = module(z, c)
assert y.shape == (1, 2, 4)
# test update_emas
y = module(z, c, update_emas=True).cuda()
assert y.shape == (1, 2, 4)
# test truncation
y = module(z, c, truncation=2).cuda()
assert y.shape == (1, 2, 4)
# test with c_dim>0
cfg = deepcopy(self.default_cfg)
cfg.update(c_dim=2)
module = MappingNetwork(**cfg).cuda()
z = torch.randn([2, 4]).cuda()
c = torch.eye(2).cuda()
y = module(z, c)
assert y.shape == (2, 2, 4)
class TestSynthesisInput:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
style_channels=6,
channels=4,
size=8,
sampling_rate=16,
bandwidth=2)
def test_cpu(self):
module = SynthesisInput(**self.default_cfg)
x = torch.randn((2, 6))
y = module(x)
assert y.shape == (2, 4, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cuda(self):
module = SynthesisInput(**self.default_cfg).cuda()
x = torch.randn((2, 6)).cuda()
y = module(x)
assert y.shape == (2, 4, 8, 8)
class TestSynthesisLayer:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
style_channels=6,
is_torgb=False,
is_critically_sampled=False,
use_fp16=False,
conv_kernel=3,
in_channels=3,
out_channels=3,
in_size=16,
out_size=16,
in_sampling_rate=16,
out_sampling_rate=16,
in_cutoff=2,
out_cutoff=2,
in_half_width=6,
out_half_width=6)
def test_cpu(self):
module = SynthesisLayer(**self.default_cfg)
x = torch.randn((2, 3, 16, 16))
w = torch.randn((2, 6))
y = module(x, w)
assert y.shape == (2, 3, 16, 16)
# test update_emas
y = module(x, w, update_emas=True)
assert y.shape == (2, 3, 16, 16)
# test force_fp32
cfg = deepcopy(self.default_cfg)
cfg.update(use_fp16=True)
module = SynthesisLayer(**cfg)
x = torch.randn((2, 3, 16, 16))
w = torch.randn((2, 6))
y = module(x, w, force_fp32=False)
assert y.shape == (2, 3, 16, 16)
assert y.dtype == torch.float32
# test critically_sampled
cfg = deepcopy(self.default_cfg)
cfg.update(is_critically_sampled=True)
module = SynthesisLayer(**cfg)
x = torch.randn((2, 3, 16, 16))
w = torch.randn((2, 6))
y = module(x, w)
assert y.shape == (2, 3, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cuda(self):
module = SynthesisLayer(**self.default_cfg).cuda()
x = torch.randn((2, 3, 16, 16)).cuda()
w = torch.randn((2, 6)).cuda()
y = module(x, w)
assert y.shape == (2, 3, 16, 16)
# test update_emas
y = module(x, w, update_emas=True).cuda()
assert y.shape == (2, 3, 16, 16)
# test critically_sampled
cfg = deepcopy(self.default_cfg)
cfg.update(is_critically_sampled=True)
module = SynthesisLayer(**cfg).cuda()
x = torch.randn((2, 3, 16, 16)).cuda()
w = torch.randn((2, 6)).cuda()
y = module(x, w)
assert y.shape == (2, 3, 16, 16)
class TestSynthesisNetwork:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
style_channels=8, out_size=16, img_channels=3, num_layers=4)
def test_cpu(self):
module = SynthesisNetwork(**self.default_cfg)
ws = torch.randn((2, 6, 8))
y = module(ws)
assert y.shape == (2, 3, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cuda(self):
module = SynthesisNetwork(**self.default_cfg).cuda()
ws = torch.randn((2, 6, 8)).cuda()
y = module(ws)
assert y.shape == (2, 3, 16, 16)
class TestStyleGAN3Generator:
@classmethod
def setup_class(cls):
synthesis_cfg = {
'type': 'SynthesisNetwork',
'channel_base': 1024,
'channel_max': 16,
'magnitude_ema_beta': 0.999
}
cls.default_cfg = dict(
noise_size=6,
style_channels=8,
out_size=16,
img_channels=3,
synthesis_cfg=synthesis_cfg)
synthesis_r_cfg = {
'type': 'SynthesisNetwork',
'channel_base': 1024,
'channel_max': 16,
'magnitude_ema_beta': 0.999,
'conv_kernel': 1,
'use_radial_filters': True
}
cls.s3_r_cfg = dict(
noise_size=6,
style_channels=8,
out_size=16,
img_channels=3,
synthesis_cfg=synthesis_r_cfg)
def test_cpu(self):
generator = StyleGANv3Generator(**self.default_cfg)
z = torch.randn((2, 6))
c = None
y = generator(z, c)
assert y.shape == (2, 3, 16, 16)
y = generator(None, num_batches=2)
assert y.shape == (2, 3, 16, 16)
res = generator(torch.randn, num_batches=1)
assert res.shape == (1, 3, 16, 16)
cfg = deepcopy(self.default_cfg)
cfg.update(dict(rgb2bgr=True))
generator = StyleGANv3Generator(**cfg)
y = generator(None, num_batches=2)
assert y.shape == (2, 3, 16, 16)
# test return latents
result = generator(None, num_batches=2, return_latents=True)
assert isinstance(result, dict)
assert result['fake_img'].shape == (2, 3, 16, 16)
assert result['noise_batch'].shape == (2, 6)
assert result['latent'].shape == (2, 16, 8)
# test input_is_latent
result = generator(
None, num_batches=2, input_is_latent=True, return_latents=True)
assert isinstance(result, dict)
assert result['fake_img'].shape == (2, 3, 16, 16)
assert result['noise_batch'].shape == (2, 8)
assert result['latent'].shape == (2, 16, 8)
generator = StyleGANv3Generator(**self.s3_r_cfg)
z = torch.randn((2, 6))
c = None
y = generator(z, c)
assert y.shape == (2, 3, 16, 16)
y = generator(None, num_batches=2)
assert y.shape == (2, 3, 16, 16)
res = generator(torch.randn, num_batches=1)
assert res.shape == (1, 3, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_cuda(self):
generator = StyleGANv3Generator(**self.default_cfg).cuda()
z = torch.randn((2, 6)).cuda()
c = None
y = generator(z, c)
assert y.shape == (2, 3, 16, 16)
res = generator(torch.randn, num_batches=1)
assert res.shape == (1, 3, 16, 16)
cfg = deepcopy(self.default_cfg)
cfg.update(dict(rgb2bgr=True))
generator = StyleGANv3Generator(**cfg).cuda()
y = generator(None, num_batches=2)
assert y.shape == (2, 3, 16, 16)
generator = StyleGANv3Generator(**self.s3_r_cfg).cuda()
z = torch.randn((2, 6)).cuda()
c = None
y = generator(z, c)
assert y.shape == (2, 3, 16, 16)
res = generator(torch.randn, num_batches=1)
assert res.shape == (1, 3, 16, 16)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment