Commit 1401de15 authored by dongchy920's avatar dongchy920
Browse files

stylegan2_mmcv

parents
Pipeline #1274 canceled with stages
checkpoint_config = dict(interval=10000, by_epoch=False)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# yapf:enable
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=1000)
]
# use dynamic runner
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=True,
pass_training_status=True)
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 10000)]
find_unused_parameters = True
cudnn_benchmark = True
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
model = dict(
type='BasiccGAN',
generator=dict(
type='BigGANGenerator',
output_scale=128,
noise_size=120,
num_classes=1000,
base_channels=96,
shared_dim=128,
with_shared_embedding=True,
sn_eps=1e-6,
init_type='ortho',
act_cfg=dict(type='ReLU', inplace=True),
split_noise=True,
auto_sync_bn=False),
discriminator=dict(
type='BigGANDiscriminator',
input_scale=128,
num_classes=1000,
base_channels=96,
sn_eps=1e-6,
init_type='ortho',
act_cfg=dict(type='ReLU', inplace=True),
with_spectral_norm=True),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(
disc_steps=8, gen_steps=1, batch_accumulation_steps=8, use_ema=True)
test_cfg = None
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.0, 0.999), eps=1e-6),
discriminator=dict(type='Adam', lr=0.0004, betas=(0.0, 0.999), eps=1e-6))
model = dict(
type='BasiccGAN',
num_classes=10,
generator=dict(
type='BigGANGenerator',
output_scale=32,
noise_size=128,
num_classes=10,
base_channels=64,
with_shared_embedding=False,
sn_eps=1e-8,
sn_style='torch',
init_type='N02',
split_noise=False,
auto_sync_bn=False),
discriminator=dict(
type='BigGANDiscriminator',
input_scale=32,
num_classes=10,
base_channels=64,
sn_eps=1e-8,
sn_style='torch',
init_type='N02',
with_spectral_norm=True),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(
disc_steps=4, gen_steps=1, batch_accumulation_steps=1, use_ema=True)
test_cfg = None
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.0, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.0, 0.999)))
_domain_a = None # set by user
_domain_b = None # set by user
model = dict(
type='CycleGAN',
generator=dict(
type='ResnetGenerator',
in_channels=3,
out_channels=3,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02)),
discriminator=dict(
type='PatchDiscriminator',
in_channels=3,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='IN'),
init_cfg=dict(type='normal', gain=0.02)),
gan_loss=dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0),
default_domain=None, # set by user
reachable_domains=None, # set by user
related_domains=None, # set by user
gen_auxiliary_loss=[
dict(
type='L1Loss',
loss_weight=10.0,
loss_name='cycle_loss',
data_info=dict(
pred=f'cycle_{_domain_a}', target=f'real_{_domain_a}'),
reduction='mean'),
dict(
type='L1Loss',
loss_weight=10.0,
loss_name='cycle_loss',
data_info=dict(
pred=f'cycle_{_domain_b}',
target=f'real_{_domain_b}',
),
reduction='mean'),
dict(
type='L1Loss',
loss_weight=0.5,
loss_name='id_loss',
data_info=dict(
pred=f'identity_{_domain_a}', target=f'real_{_domain_a}'),
reduction='mean'),
dict(
type='L1Loss',
loss_weight=0.5,
loss_name='id_loss',
data_info=dict(
pred=f'identity_{_domain_b}', target=f'real_{_domain_b}'),
reduction='mean')
])
train_cfg = dict(buffer_size=50)
test_cfg = None
# define GAN model
model = dict(
type='StaticUnconditionalGAN',
generator=dict(
type='DCGANGenerator', output_scale=128, base_channels=1024),
discriminator=dict(
type='DCGANDiscriminator',
input_scale=128,
output_scale=4,
out_channels=100),
gan_loss=dict(type='GANLoss', gan_type='vanilla'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
# define GAN model
model = dict(
type='StaticUnconditionalGAN',
generator=dict(type='DCGANGenerator', output_scale=64, base_channels=1024),
discriminator=dict(
type='DCGANDiscriminator',
input_scale=64,
output_scale=4,
out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='vanilla'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
model = dict(
type='BasicGaussianDiffusion',
num_timesteps=4000,
betas_cfg=dict(type='cosine'),
denoising=dict(
type='DenoisingUnet',
image_size=32,
in_channels=3,
base_channels=128,
resblocks_per_downsample=3,
attention_res=[16, 8],
use_scale_shift_norm=True,
dropout=0.3,
num_heads=4,
use_rescale_timesteps=True,
output_cfg=dict(mean='eps', var='learned_range')),
timestep_sampler=dict(type='UniformTimeStepSampler'),
ddpm_loss=[
dict(
type='DDPMVLBLoss',
rescale_mode='constant',
rescale_cfg=dict(scale=4000 / 1000),
data_info=dict(
mean_pred='mean_pred',
mean_target='mean_posterior',
logvar_pred='logvar_pred',
logvar_target='logvar_posterior'),
log_cfgs=[
dict(
type='quartile',
prefix_name='loss_vlb',
total_timesteps=4000),
dict(type='name')
]),
dict(
type='DDPMMSELoss',
log_cfgs=dict(
type='quartile', prefix_name='loss_mse', total_timesteps=4000),
)
],
)
train_cfg = dict(use_ema=True, real_img_key='img')
test_cfg = None
optimizer = dict(denoising=dict(type='AdamW', lr=1e-4, weight_decay=0))
model = dict(
type='BasicGaussianDiffusion',
num_timesteps=4000,
betas_cfg=dict(type='cosine'),
denoising=dict(
type='DenoisingUnet',
image_size=64,
in_channels=3,
base_channels=128,
resblocks_per_downsample=3,
attention_res=[16, 8],
use_scale_shift_norm=True,
dropout=0,
num_heads=4,
use_rescale_timesteps=True,
output_cfg=dict(mean='eps', var='learned_range')),
timestep_sampler=dict(type='UniformTimeStepSampler'),
ddpm_loss=[
dict(
type='DDPMVLBLoss',
rescale_mode='constant',
rescale_cfg=dict(scale=4000 / 1000),
data_info=dict(
mean_pred='mean_pred',
mean_target='mean_posterior',
logvar_pred='logvar_pred',
logvar_target='logvar_posterior'),
log_cfgs=[
dict(
type='quartile',
prefix_name='loss_vlb',
total_timesteps=4000),
dict(type='name')
]),
dict(
type='DDPMMSELoss',
log_cfgs=dict(
type='quartile', prefix_name='loss_mse', total_timesteps=4000),
)
],
)
train_cfg = dict(use_ema=True, real_img_key='img')
test_cfg = None
optimizer = dict(denoising=dict(type='AdamW', lr=1e-4, weight_decay=0))
# define GAN model
model = dict(
type='StaticUnconditionalGAN',
generator=dict(
type='LSGANGenerator',
output_scale=128,
base_channels=256,
noise_size=1024),
discriminator=dict(
type='LSGANDiscriminator', input_scale=128, base_channels=64),
gan_loss=dict(type='GANLoss', gan_type='lsgan'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
# define GAN model
model = dict(
type='ProgressiveGrowingGAN',
generator=dict(type='PGGANGenerator', out_scale=1024, noise_size=512),
discriminator=dict(type='PGGANDiscriminator', in_scale=1024),
gan_loss=dict(type='GANLoss', gan_type='wgan'),
disc_auxiliary_loss=[
dict(
type='DiscShiftLoss',
loss_weight=0.001 * 0.5,
data_info=dict(pred='disc_pred_fake')),
dict(
type='DiscShiftLoss',
loss_weight=0.001 * 0.5,
data_info=dict(pred='disc_pred_real')),
dict(
type='GradientPenaltyLoss',
loss_weight=10,
norm_mode='HWC',
data_info=dict(
discriminator='disc_partial',
real_data='real_imgs',
fake_data='fake_imgs'))
])
train_cfg = dict(
use_ema=True,
nkimgs_per_scale={
'4': 600,
'8': 1200,
'16': 1200,
'32': 1200,
'64': 1200,
'128': 1200,
'256': 1200,
'512': 1200,
'1024': 12000,
},
transition_kimgs=600,
optimizer_cfg=dict(
generator=dict(type='Adam', lr=0.001, betas=(0., 0.99)),
discriminator=dict(type='Adam', lr=0.001, betas=(0., 0.99))),
g_lr_base=0.001,
d_lr_base=0.001,
g_lr_schedule={
'128': 0.0015,
'256': 0.002,
'512': 0.003,
'1024': 0.003
},
d_lr_schedule={
'128': 0.0015,
'256': 0.002,
'512': 0.003,
'1024': 0.003
})
test_cfg = None
# define GAN model
model = dict(
type='ProgressiveGrowingGAN',
generator=dict(type='PGGANGenerator', out_scale=128, noise_size=512),
discriminator=dict(type='PGGANDiscriminator', in_scale=128),
gan_loss=dict(type='GANLoss', gan_type='wgan'),
disc_auxiliary_loss=[
dict(
type='DiscShiftLoss',
loss_weight=0.001 * 0.5,
data_info=dict(pred='disc_pred_fake')),
dict(
type='DiscShiftLoss',
loss_weight=0.001 * 0.5,
data_info=dict(pred='disc_pred_real')),
dict(
type='GradientPenaltyLoss',
loss_weight=10,
norm_mode='HWC',
data_info=dict(
discriminator='disc_partial',
real_data='real_imgs',
fake_data='fake_imgs'))
])
train_cfg = dict(
use_ema=True,
nkimgs_per_scale={
'4': 600,
'8': 1200,
'16': 1200,
'32': 1200,
'64': 1200,
'128': 12000
},
transition_kimgs=600,
optimizer_cfg=dict(
generator=dict(type='Adam', lr=0.001, betas=(0., 0.99)),
discriminator=dict(type='Adam', lr=0.001, betas=(0., 0.99))),
g_lr_base=0.001,
d_lr_base=0.001,
g_lr_schedule={'128': 0.0015},
d_lr_schedule={'128': 0.0015})
test_cfg = None
source_domain = None # set by user
target_domain = None # set by user
# model settings
model = dict(
type='Pix2Pix',
generator=dict(
type='UnetGenerator',
in_channels=3,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02)),
discriminator=dict(
type='PatchDiscriminator',
in_channels=6,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02)),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0),
default_domain=target_domain,
reachable_domains=[target_domain],
related_domains=[target_domain, source_domain],
gen_auxiliary_loss=dict(
type='L1Loss',
loss_weight=100.0,
loss_name='pixel_loss',
data_info=dict(
pred=f'fake_{target_domain}', target=f'real_{target_domain}'),
reduction='mean'))
# model training and testing settings
train_cfg = None
test_cfg = None
# define GAN model
model = dict(
type='BasiccGAN',
generator=dict(
type='SAGANGenerator',
output_scale=128,
base_channels=64,
attention_cfg=dict(type='SelfAttentionBlock'),
attention_after_nth_block=4,
with_spectral_norm=True),
discriminator=dict(
type='ProjDiscriminator',
input_scale=128,
base_channels=64,
attention_cfg=dict(type='SelfAttentionBlock'),
attention_after_nth_block=1,
with_spectral_norm=True),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.0, 0.999)),
discriminator=dict(type='Adam', lr=0.0004, betas=(0.0, 0.999)))
# define GAN model
model = dict(
type='BasiccGAN',
generator=dict(
type='SAGANGenerator',
output_scale=32,
base_channels=256,
attention_cfg=dict(type='SelfAttentionBlock'),
attention_after_nth_block=2,
with_spectral_norm=True),
discriminator=dict(
type='ProjDiscriminator',
input_scale=32,
base_channels=128,
attention_cfg=dict(type='SelfAttentionBlock'),
attention_after_nth_block=1,
with_spectral_norm=True),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(disc_steps=5)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
model = dict(
type='SinGAN',
generator=dict(
type='SinGANMultiScaleGenerator',
in_channels=3,
out_channels=3,
num_scales=None, # need to be specified
),
discriminator=dict(
type='SinGANMultiScaleDiscriminator',
in_channels=3,
num_scales=None, # need to be specified
),
gan_loss=dict(type='GANLoss', gan_type='wgan', loss_weight=1),
disc_auxiliary_loss=[
dict(
type='GradientPenaltyLoss',
loss_weight=0.1,
norm_mode='pixel',
data_info=dict(
discriminator='disc_partial',
real_data='real_imgs',
fake_data='fake_imgs'))
],
gen_auxiliary_loss=dict(
type='MSELoss',
loss_weight=10,
data_info=dict(pred='recon_imgs', target='real_imgs'),
))
train_cfg = dict(
noise_weight_init=0.1,
iters_per_scale=2000,
curr_scale=-1,
disc_steps=3,
generator_steps=3,
lr_d=0.0005,
lr_g=0.0005,
lr_scheduler_args=dict(milestones=[1600], gamma=0.1))
test_cfg = None
# define GAN model
model = dict(
type='BasiccGAN',
generator=dict(type='SNGANGenerator', output_scale=128, base_channels=64),
discriminator=dict(
type='ProjDiscriminator', input_scale=128, base_channels=64),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(disc_steps=2)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
# define GAN model
model = dict(
type='BasiccGAN',
generator=dict(type='SNGANGenerator', output_scale=32, base_channels=256),
discriminator=dict(
type='ProjDiscriminator', input_scale=32, base_channels=128),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(disc_steps=5)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
# define GAN model
d_reg_interval = 16
g_reg_interval = 4
g_reg_ratio = g_reg_interval / (g_reg_interval + 1)
d_reg_ratio = d_reg_interval / (d_reg_interval + 1)
model = dict(
type='StaticUnconditionalGAN',
generator=dict(
type='StyleGANv2Generator',
out_size=None, # Need to be set.
style_channels=512,
),
discriminator=dict(
type='StyleGAN2Discriminator',
in_size=None, # Need to be set.
),
gan_loss=dict(type='GANLoss', gan_type='wgan-logistic-ns'),
disc_auxiliary_loss=dict(
type='R1GradientPenalty',
loss_weight=10. / 2. * d_reg_interval,
interval=d_reg_interval,
norm_mode='HWC',
data_info=dict(real_data='real_imgs', discriminator='disc')),
gen_auxiliary_loss=dict(
type='GeneratorPathRegularizer',
loss_weight=2. * g_reg_interval,
pl_batch_shrink=2,
interval=g_reg_interval,
data_info=dict(generator='gen', num_batches='batch_size')))
train_cfg = dict(use_ema=True)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(
type='Adam', lr=0.002 * g_reg_ratio, betas=(0, 0.99**g_reg_ratio)),
discriminator=dict(
type='Adam', lr=0.002 * d_reg_ratio, betas=(0, 0.99**d_reg_ratio)))
# define GAN model
d_reg_interval = 16
g_reg_interval = 4
g_reg_ratio = g_reg_interval / (g_reg_interval + 1)
d_reg_ratio = d_reg_interval / (d_reg_interval + 1)
model = dict(
type='StaticUnconditionalGAN',
generator=dict(
type='StyleGANv3Generator',
noise_size=512,
style_channels=512,
out_size=None, # Need to be set.
img_channels=3,
),
discriminator=dict(
type='StyleGAN2Discriminator',
in_size=None, # Need to be set.
),
gan_loss=dict(type='GANLoss', gan_type='wgan-logistic-ns'),
disc_auxiliary_loss=dict(
type='R1GradientPenalty',
loss_weight=10. / 2. * d_reg_interval,
interval=d_reg_interval,
norm_mode='HWC',
data_info=dict(real_data='real_imgs', discriminator='disc')),
gen_auxiliary_loss=dict(
type='GeneratorPathRegularizer',
loss_weight=2. * g_reg_interval,
pl_batch_shrink=2,
interval=g_reg_interval,
data_info=dict(generator='gen', num_batches='batch_size')))
train_cfg = dict(use_ema=True)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(
type='Adam', lr=0.0025 * g_reg_ratio, betas=(0, 0.99**g_reg_ratio)),
discriminator=dict(
type='Adam', lr=0.002 * d_reg_ratio, betas=(0, 0.99**d_reg_ratio)))
model = dict(
type='StyleGANV1',
generator=dict(
type='StyleGANv1Generator', out_size=None, style_channels=512),
discriminator=dict(type='StyleGAN1Discriminator', in_size=None),
gan_loss=dict(type='GANLoss', gan_type='wgan-logistic-ns'),
disc_auxiliary_loss=[
dict(
type='R1GradientPenalty',
loss_weight=10,
norm_mode='HWC',
data_info=dict(
discriminator='disc_partial', real_data='real_imgs'))
])
train_cfg = dict(
use_ema=True,
transition_kimgs=600,
optimizer_cfg=dict(
generator=dict(type='Adam', lr=0.001, betas=(0.0, 0.99)),
discriminator=dict(type='Adam', lr=0.001, betas=(0.0, 0.99))),
g_lr_base=0.001,
d_lr_base=0.001,
g_lr_schedule=dict({
'128': 0.0015,
'256': 0.002,
'512': 0.003,
'1024': 0.003
}),
d_lr_schedule=dict({
'128': 0.0015,
'256': 0.002,
'512': 0.003,
'1024': 0.003
}))
test_cfg = None
optimizer = None
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment