Commit 1401de15 authored by dongchy920's avatar dongchy920
Browse files

stylegan2_mmcv

parents
Pipeline #1274 canceled with stages
_base_ = [
'../_base_/models/dcgan/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128, train=dict(imgs_root='data/lsun/bedroom_train'))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=100000, by_epoch=False)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
total_iters = 1500002
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)))
_base_ = [
'../_base_/models/dcgan/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
# output single channel
model = dict(generator=dict(out_channels=1), discriminator=dict(in_channels=1))
# define dataset
# modify train_pipeline to load gray scale images
train_pipeline = [
dict(
type='LoadImageFromFile',
key='real_img',
flag='grayscale',
io_backend='disk'),
dict(type='Resize', keys=['real_img'], scale=(64, 64)),
dict(
type='Normalize',
keys=['real_img'],
mean=[127.5],
std=[127.5],
to_rgb=False),
dict(type='ImageToTensor', keys=['real_img']),
dict(type='Collect', keys=['real_img'], meta_keys=['real_img_path'])
]
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128,
train=dict(imgs_root='data/mnist_64/train', pipeline=train_pipeline),
val=None)
# adjust running config
lr_config = None
checkpoint_config = dict(interval=500, by_epoch=False)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=100)
]
log_config = dict(
interval=100, hooks=[
dict(type='TextLoggerHook'),
])
total_iters = 5000
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)))
optimizer = dict(
generator=dict(type='Adam', lr=0.0004, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.999)))
Collections:
- Metadata:
Architecture:
- Unsupervised Representation Learning with Deep Convolutional Generative Adversarial
Networks
Name: Unsupervised Representation Learning with Deep Convolutional Generative Adversarial
Networks
Paper:
- https://arxiv.org/abs/1511.06434
README: configs/dcgan/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/dcgan/dcgan_mnist-64_b128x1_Glr4e-4_Dlr1e-4_5k.py
In Collection: Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks
Metadata:
Training Data: Others
Name: dcgan_mnist-64_b128x1_Glr4e-4_Dlr1e-4_5k
Results:
- Dataset: Others
Metrics:
MS-SSIM: 0.1395
SWD: 21.16, 4.4, 8.41/11.32
Task: Unconditional GANs
Weights: https://download.openmmlab.com//mmgen/dcgan/dcgan_mnist-64_b128x1_Glr4e-4_Dlr1e-4_5k_20210512_163926-207a1eaf.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/dcgan/dcgan_celeba-cropped_64_b128x1_300k.py
In Collection: Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks
Metadata:
Training Data: CELEBA
Name: dcgan_celeba-cropped_64_b128x1_300k
Results:
- Dataset: CELEBA
Metrics:
MS-SSIM: 0.2899
SWD: 8.93,10.53,50.32/23.26
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/dcgan/dcgan_celeba-cropped_64_b128x1_300kiter_20210408_161607-1f8a2277.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/dcgan/dcgan_lsun-bedroom_64x64_b128x1_5e.py
In Collection: Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks
Metadata:
Training Data: LSUN
Name: dcgan_lsun-bedroom_64x64_b128x1_5e
Results:
- Dataset: LSUN
Metrics:
MS-SSIM: 0.2095
SWD: 42.79, 34.55, 98.46/58.6
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/dcgan/dcgan_lsun-bedroom_64_b128x1_5e_20210408_161713-117c498b.pth
_base_ = [
'../_base_/models/dcgan/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
model = dict(
discriminator=dict(output_scale=4, out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128,
train=dict(imgs_root='./data/celeba/cropped_images_aligned_png/'),
val=dict(imgs_root='./data/celeba/cropped_images_aligned_png/'))
optimizer = dict(
generator=dict(type='Adam', lr=0.001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
_base_ = [
'../_base_/models/dcgan/dcgan_128x128.py',
'../_base_/datasets/unconditional_imgs_128x128.py',
'../_base_/default_runtime.py'
]
model = dict(
discriminator=dict(output_scale=4, out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=64,
train=dict(imgs_root='./data/celeba/cropped_images_aligned_png/'),
val=dict(imgs_root='./data/celeba/cropped_images_aligned_png/'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 160000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 128, 128)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
_base_ = [
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
model = dict(
type='StaticUnconditionalGAN',
generator=dict(type='LSGANGenerator', output_scale=64),
discriminator=dict(type='LSGANDiscriminator', input_scale=64),
gan_loss=dict(type='GANLoss', gan_type='hinge'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128,
train=dict(imgs_root='data/lsun/bedroom_train'),
val=dict(imgs_root='data/lsun/bedroom_train'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 160000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
Collections:
- Metadata:
Architecture:
- GGAN
Name: GGAN
Paper:
- https://arxiv.org/abs/1705.02894
README: configs/ggan/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/ggan/ggan_celeba-cropped_dcgan-archi_lr-1e-3_64_b128x1_12m.py
In Collection: GGAN
Metadata:
Training Data: CELEBA
Name: ggan_celeba-cropped_dcgan-archi_lr-1e-3_64_b128x1_12m
Results:
- Dataset: CELEBA
Metrics:
FID: 20.1797
MS-SSIM: 0.3318
SWD: 11.18, 12.21, 39.16/20.85
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/ggan/ggan_celeba-cropped_dcgan-archi_lr-1e-3_64_b128x1_12m.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/ggan/ggan_celeba-cropped_dcgan-archi_lr-1e-4_128_b64x1_10m.py
In Collection: GGAN
Metadata:
Training Data: CELEBA
Name: ggan_celeba-cropped_dcgan-archi_lr-1e-4_128_b64x1_10m
Results:
- Dataset: CELEBA
Metrics:
FID: 18.7647
MS-SSIM: 0.3149
SWD: 9.81, 11.29, 19.22, 47.79/22.03
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/ggan/ggan_celeba-cropped_dcgan-archi_lr-1e-4_128_b64x1_10m_20210430_143027-516423dc.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/ggan/ggan_lsun-bedroom_lsgan_archi_lr-1e-4_64_b128x1_20m.py
In Collection: GGAN
Metadata:
Training Data: LSUN
Name: ggan_lsun-bedroom_lsgan_archi_lr-1e-4_64_b128x1_20m
Results:
- Dataset: LSUN
Metrics:
FID: 85.6629
MS-SSIM: 0.0649
SWD: 9.1, 6.2, 12.27/9.19
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/ggan/ggan_lsun-bedroom_lsgan_archi_lr-1e-4_64_b128x1_20m_20210430_143114-5d99b76c.pth
_base_ = [
'../_base_/models/improved_ddpm/ddpm_32x32.py',
'../_base_/datasets/cifar10_noaug.py', '../_base_/default_runtime.py'
]
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='MMGenVisualizationHook',
output_dir='training_samples',
res_name_list=['real_imgs', 'x_0_pred', 'x_t', 'x_t_1'],
padding=1,
interval=1000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('denoising_ema'),
interval=1,
start_iter=0,
interp_cfg=dict(momentum=0.9999),
priority='VERY_HIGH')
]
# do not evaluation in training process because evaluation take too much time.
evaluation = None
total_iters = 500000 # 500k
data = dict(samples_per_gpu=16) # 8x16=128
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
inception_pkl = './work_dirs/inception_pkl/cifar10.pkl'
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
bgr2rgb=True,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')))
_base_ = [
'../_base_/models/improved_ddpm/ddpm_64x64.py',
'../_base_/datasets/imagenet_noaug_64.py', '../_base_/default_runtime.py'
]
# set dropout prob as 0.3
model = dict(denoising=dict(dropout=0.3))
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='MMGenVisualizationHook',
output_dir='training_samples',
res_name_list=['real_imgs', 'x_0_pred', 'x_t', 'x_t_1'],
padding=1,
interval=1000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('denoising_ema'),
interval=1,
start_iter=0,
interp_cfg=dict(momentum=0.9999),
priority='VERY_HIGH')
]
# do not evaluation in training process because evaluation take too much time.
evaluation = None
total_iters = 1500000 # 1500k
data = dict(samples_per_gpu=16) # 8x16=128
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
inception_pkl = './work_dirs/inception_pkl/imagenet_64x64.pkl'
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
bgr2rgb=True,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')))
_base_ = [
'../_base_/models/improved_ddpm/ddpm_64x64.py',
'../_base_/datasets/imagenet_noaug_64.py', '../_base_/default_runtime.py'
]
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='MMGenVisualizationHook',
output_dir='training_samples',
res_name_list=['real_imgs', 'x_0_pred', 'x_t', 'x_t_1'],
padding=1,
interval=1000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('denoising_ema'),
interval=1,
start_iter=0,
interp_cfg=dict(momentum=0.9999),
priority='VERY_HIGH')
]
# do not evaluation in training process because evaluation take too much time.
evaluation = None
total_iters = 1500000 # 1500k
data = dict(samples_per_gpu=16) # 8x16=128
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
inception_pkl = './work_dirs/inception_pkl/imagenet_64x64.pkl'
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
bgr2rgb=True,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')))
Collections:
- Metadata:
Architecture:
- Improved-DDPM
Name: Improved-DDPM
Paper:
- https://arxiv.org/abs/2102.09672
README: configs/improved_ddpm/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/blob/master/configs/improved_ddpm/ddpm_cosine_hybird_timestep-4k_drop0.3_cifar10_32x32_b8x16_500k.py
In Collection: Improved-DDPM
Metadata:
Training Data: CIFAR
Name: ddpm_cosine_hybird_timestep-4k_drop0.3_cifar10_32x32_b8x16_500k
Results:
- Dataset: CIFAR
Metrics:
FID: 3.8848
Task: Denoising Diffusion Probabilistic Models
Weights: https://download.openmmlab.com/mmgen/improved_ddpm/ddpm_cosine_hybird_timestep-4k_drop0.3_cifar10_32x32_b8x16_500k_20220103_222621-2f42f476.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/improve_ddpm/ddpm_cosine_hybird_timestep-4k_imagenet1k_64x64_b8x16_1500k.py
In Collection: Improved-DDPM
Metadata:
Training Data: IMAGENET
Name: ddpm_cosine_hybird_timestep-4k_imagenet1k_64x64_b8x16_1500k
Results:
- Dataset: IMAGENET
Metrics:
FID: 13.5181
Task: Denoising Diffusion Probabilistic Models
Weights: https://download.openmmlab.com/mmgen/improved_ddpm/ddpm_cosine_hybird_timestep-4k_imagenet1k_64x64_b8x16_1500k_20220103_223919-b8f1a310.pth
- Config: https://github.com/open-mmlab/mmgeneration/blob/master/configs/improved_ddpm/ddpm_cosine_hybird_timestep-4k_drop0.3_imagenet1k_64x64_b8x16_1500k.py
In Collection: Improved-DDPM
Metadata:
Training Data: IMAGENET
Name: ddpm_cosine_hybird_timestep-4k_drop0.3_imagenet1k_64x64_b8x16_1500k
Results:
- Dataset: IMAGENET
Metrics:
FID: 13.4094
Task: Denoising Diffusion Probabilistic Models
Weights: https://download.openmmlab.com/mmgen/improved_ddpm/ddpm_cosine_hybird_timestep-4k_drop0.3_imagenet1k_64x64_b8x16_1500k_20220103_224427-7bb55975.pth
_base_ = [
'../_base_/models/dcgan/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
model = dict(gan_loss=dict(type='GANLoss', gan_type='lsgan'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128,
train=dict(imgs_root='./data/celeba-cropped/cropped_images_aligned_png/'))
optimizer = dict(
generator=dict(type='Adam', lr=0.001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
_base_ = [
'../_base_/models/dcgan/dcgan_128x128.py',
'../_base_/datasets/unconditional_imgs_128x128.py',
'../_base_/default_runtime.py'
]
model = dict(
discriminator=dict(output_scale=4, out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='lsgan'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=64,
train=dict(imgs_root='./data/celeba-cropped/cropped_images_aligned_png/'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 160000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 128, 128)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
_base_ = [
'../_base_/models/dcgan/dcgan_64x64.py',
'../_base_/datasets/unconditional_imgs_64x64.py',
'../_base_/default_runtime.py'
]
model = dict(
discriminator=dict(output_scale=4, out_channels=1),
gan_loss=dict(type='GANLoss', gan_type='lsgan'))
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=128, train=dict(imgs_root='./data/lsun/bedroom_train'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 64, 64)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
_base_ = [
'../_base_/models/lsgan/lsgan_128x128.py',
'../_base_/datasets/unconditional_imgs_128x128.py',
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data = dict(
samples_per_gpu=64, train=dict(imgs_root='./data/lsun/bedroom_train'))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)),
discriminator=dict(type='Adam', lr=0.0001, betas=(0.5, 0.99)))
# adjust running config
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=10000)
]
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='orig'))
total_iters = 160000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 128, 128)),
fid50k=dict(type='FID', num_images=50000, inception_pkl=None))
Collections:
- Metadata:
Architecture:
- LSGAN
Name: LSGAN
Paper:
- https://openaccess.thecvf.com/content_iccv_2017/html/Mao_Least_Squares_Generative_ICCV_2017_paper.html
README: configs/lsgan/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/lsgan/lsgan_dcgan-archi_lr-1e-3_celeba-cropped_64_b128x1_12m.py
In Collection: LSGAN
Metadata:
Training Data: CELEBA
Name: lsgan_dcgan-archi_lr-1e-3_celeba-cropped_64_b128x1_12m
Results:
- Dataset: CELEBA
Metrics:
FID: 11.9258
MS-SSIM: 0.3216
SWD: 6.16, 6.83, 37.64/16.87
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/lsgan/lsgan_celeba-cropped_dcgan-archi_lr-1e-3_64_b128x1_12m_20210429_144001-92ca1d0d.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/lsgan/lsgan_dcgan-archi_lr-1e-4_lsun-bedroom_64_b128x1_12m.py
In Collection: LSGAN
Metadata:
Training Data: LSUN
Name: lsgan_dcgan-archi_lr-1e-4_lsun-bedroom_64_b128x1_12m
Results:
- Dataset: LSUN
Metrics:
FID: 30.739
MS-SSIM: 0.0671
SWD: 5.66, 9.0, 18.6/11.09
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/lsgan/lsgan_lsun-bedroom_dcgan-archi_lr-1e-4_64_b128x1_12m_20210429_144602-ec4ec6bb.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/lsgan/lsgan_dcgan-archi_lr-1e-4_celeba-cropped_128_b64x1_10m.py
In Collection: LSGAN
Metadata:
Training Data: CELEBA
Name: lsgan_dcgan-archi_lr-1e-4_celeba-cropped_128_b64x1_10m
Results:
- Dataset: CELEBA
Metrics:
FID: 38.3752
MS-SSIM: 0.3691
SWD: 21.66, 9.83, 16.06, 70.76/29.58
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/lsgan/lsgan_celeba-cropped_dcgan-archi_lr-1e-4_128_b64x1_10m_20210429_144229-01ba67dc.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/lsgan/lsgan_lsgan-archi_lr-1e-4_lsun-bedroom_128_b64x1_10m.py
In Collection: LSGAN
Metadata:
Training Data: LSUN
Name: lsgan_lsgan-archi_lr-1e-4_lsun-bedroom_128_b64x1_10m
Results:
- Dataset: LSUN
Metrics:
FID: 51.55
MS-SSIM: 0.0612
SWD: 19.52, 9.99, 7.48, 14.3/12.82
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/lsgan/lsgan_lsun-bedroom_lsgan-archi_lr-1e-4_128_b64x1_10m_20210429_155605-cf78c0a8.pth
Collections:
- Metadata:
Architecture:
- PGGAN
Name: PGGAN
Paper:
- https://arxiv.org/abs/1710.10196
README: configs/pggan/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/pggan/pggan_celeba-cropped_128_g8_12Mimgs.py
In Collection: PGGAN
Metadata:
Training Data: CELEBA
Name: pggan_celeba-cropped_128_g8_12Mimgs
Results:
- Dataset: CELEBA
Metrics:
Details: celeba-cropped
MS-SSIM: 0.3023
SWD(xx,xx,xx,xx/avg): 3.42, 4.04, 4.78, 20.38/8.15
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/pggan/pggan_celeba-cropped_128_g8_20210408_181931-85a2e72c.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/pggan/pggan_lsun-bedroom_128_g8_12Mimgs.py
In Collection: PGGAN
Metadata:
Training Data: LSUN
Name: pggan_lsun-bedroom_128_g8_12Mimgs
Results:
- Dataset: LSUN
Metrics:
Details: lsun-bedroom
MS-SSIM: 0.0602
SWD(xx,xx,xx,xx/avg): 3.5, 2.96, 2.76, 9.65/4.72
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/pggan/pggan_lsun-bedroom_128x128_g8_20210408_182033-5e59f45d.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/pggan/pggan_celeba-hq_1024_g8_12Mimg.py
In Collection: PGGAN
Metadata:
Training Data: CELEBA
Name: pggan_celeba-hq_1024_g8_12Mimg
Results:
- Dataset: CELEBA
Metrics:
Details: celeba-hq
MS-SSIM: 0.3379
SWD(xx,xx,xx,xx/avg): 8.93, 3.98, 3.07, 2.64/4.655
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/pggan/pggan_celeba-hq_1024_g8_20210408_181911-f1ef51c3.pth
_base_ = [
'../_base_/models/pggan/pggan_128x128.py',
'../_base_/datasets/grow_scale_imgs_128x128.py',
'../_base_/default_runtime.py'
]
optimizer = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
data = dict(
samples_per_gpu=64,
train=dict(
imgs_roots={'128': './data/celeba-cropped/cropped_images_aligned_png'},
gpu_samples_base=4,
# note that this should be changed with total gpu number
gpu_samples_per_scale={
'4': 64,
'8': 32,
'16': 16,
'32': 8,
'64': 4
}))
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(type='PGGANFetchDataHook', interval=1),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
priority='VERY_HIGH')
]
lr_config = None
total_iters = 280000
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 128, 128)))
_base_ = [
'../_base_/models/pggan/pggan_1024.py',
'../_base_/datasets/grow_scale_imgs_celeba-hq.py',
'../_base_/default_runtime.py'
]
optimizer = None
checkpoint_config = dict(interval=5000, by_epoch=False, max_keep_ckpts=20)
data = dict(
samples_per_gpu=64,
train=dict(
gpu_samples_base=4,
# note that this should be changed with total gpu number
gpu_samples_per_scale={
'4': 64,
'8': 32,
'16': 16,
'32': 8,
'64': 4
},
))
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(type='PGGANFetchDataHook', interval=1),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
priority='VERY_HIGH')
]
lr_config = None
total_iters = 280000
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 1024, 1024)))
_base_ = [
'../_base_/models/pggan/pggan_128x128.py',
'../_base_/datasets/grow_scale_imgs_128x128.py',
'../_base_/default_runtime.py'
]
optimizer = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
data = dict(
samples_per_gpu=64,
train=dict(
imgs_roots={'128': './data/lsun/bedroom_train'},
gpu_samples_base=4,
# note that this should be changed with total gpu number
gpu_samples_per_scale={
'4': 64,
'8': 32,
'16': 16,
'32': 8,
'64': 4
},
))
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(type='PGGANFetchDataHook', interval=1),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
priority='VERY_HIGH')
]
lr_config = None
total_iters = 280000
metrics = dict(
ms_ssim10k=dict(type='MS_SSIM', num_images=10000),
swd16k=dict(type='SWD', num_images=16384, image_shape=(3, 128, 128)))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment