Commit 1401de15 authored by dongchy920's avatar dongchy920
Browse files

stylegan2_mmcv

parents
Pipeline #1274 canceled with stages
_base_ = [
'../_base_/models/sngan_proj/sngan_proj_128x128.py',
'../_base_/datasets/imagenet_128.py', '../_base_/default_runtime.py'
]
num_classes = 1000
init_cfg = dict(type='studio')
model = dict(
num_classes=num_classes,
generator=dict(
num_classes=num_classes,
act_cfg=dict(type='ReLU', inplace=True),
init_cfg=init_cfg),
discriminator=dict(
num_classes=num_classes,
act_cfg=dict(type='ReLU', inplace=True),
init_cfg=init_cfg))
n_disc = 5
train_cfg = dict(disc_steps=n_disc)
lr_config = None
checkpoint_config = dict(interval=50000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
inception_pkl = './work_dirs/inception_pkl/imagenet.pkl'
evaluation = dict(
type='GenerativeEvalHook',
interval=dict(milestones=[800000], interval=[10000, 4000]),
metrics=[
dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
bgr2rgb=True,
inception_args=dict(type='StyleGAN')),
dict(type='IS', num_images=50000)
],
best_metric=['fid', 'is'],
sample_kwargs=dict(sample_model='orig'))
total_iters = 500000 * n_disc
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')),
IS50k=dict(type='IS', num_images=50000))
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.0, 0.999)),
discriminator=dict(type='Adam', lr=0.00005, betas=(0.0, 0.999)))
# train on 2 gpus
data = dict(samples_per_gpu=128)
_base_ = [
'../_base_/models/sngan_proj/sngan_proj_128x128.py',
'../_base_/datasets/imagenet_128.py', '../_base_/default_runtime.py'
]
num_classes = 1000
init_cfg = dict(type='studio')
model = dict(
num_classes=num_classes,
generator=dict(num_classes=num_classes, init_cfg=init_cfg),
discriminator=dict(num_classes=num_classes, init_cfg=init_cfg))
n_disc = 5
train_cfg = dict(disc_steps=n_disc)
lr_config = None
checkpoint_config = dict(interval=50000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
inception_pkl = './work_dirs/inception_pkl/imagenet.pkl'
evaluation = dict(
type='GenerativeEvalHook',
interval=dict(milestones=[800000], interval=[10000, 4000]),
metrics=[
dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
bgr2rgb=True,
inception_args=dict(type='StyleGAN')),
dict(type='IS', num_images=50000)
],
best_metric=['fid', 'is'],
sample_kwargs=dict(sample_model='orig'))
total_iters = 500000 * n_disc
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')),
IS50k=dict(type='IS', num_images=50000))
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.0, 0.999)),
discriminator=dict(type='Adam', lr=0.00005, betas=(0.0, 0.999)))
# train on 2 gpus
data = dict(samples_per_gpu=128)
_base_ = ['../_base_/models/sngan_proj/sngan_proj_32x32.py']
# follow pytorch GAN-Studio, random flip is used in the dataset
_base_ = [
'../_base_/models/sngan_proj/sngan_proj_32x32.py',
'../_base_/datasets/cifar10_nopad.py', '../_base_/default_runtime.py'
]
num_classes = 10
init_cfg = dict(type='studio')
model = dict(
num_classes=num_classes,
generator=dict(
act_cfg=dict(type='ReLU', inplace=True),
num_classes=num_classes,
init_cfg=init_cfg),
discriminator=dict(
act_cfg=dict(type='ReLU', inplace=True),
num_classes=num_classes,
init_cfg=init_cfg))
n_disc = 5
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
inception_pkl = './work_dirs/inception_pkl/cifar10.pkl'
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=[
dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
bgr2rgb=True,
inception_args=dict(type='StyleGAN')),
dict(type='IS', num_images=50000)
],
best_metric=['fid', 'is'],
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000 * n_disc
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')),
IS50k=dict(type='IS', num_images=50000))
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
data = dict(samples_per_gpu=64)
# follow pytorch GAN-Studio, random flip is used in the dataset
_base_ = [
'../_base_/models/sngan_proj/sngan_proj_32x32.py',
'../_base_/datasets/cifar10_nopad.py', '../_base_/default_runtime.py'
]
num_classes = 10
init_cfg = dict(type='studio')
model = dict(
num_classes=num_classes,
generator=dict(num_classes=num_classes, init_cfg=init_cfg),
discriminator=dict(num_classes=num_classes, init_cfg=init_cfg))
n_disc = 5
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000)
]
inception_pkl = './work_dirs/inception_pkl/cifar10.pkl'
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=[
dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
bgr2rgb=True,
inception_args=dict(type='StyleGAN')),
dict(type='IS', num_images=50000)
],
best_metric=['fid', 'is'],
sample_kwargs=dict(sample_model='orig'))
total_iters = 100000 * n_disc
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')),
IS50k=dict(type='IS', num_images=50000))
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
data = dict(samples_per_gpu=64)
Collections:
- Metadata:
Architecture:
- StyleGANv1
Name: StyleGANv1
Paper:
- https://openaccess.thecvf.com/content_CVPR_2019/html/Karras_A_Style-Based_Generator_Architecture_for_Generative_Adversarial_Networks_CVPR_2019_paper.html
README: configs/styleganv1/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv1/styleganv1_ffhq_256_g8_25Mimg.py
In Collection: StyleGANv1
Metadata:
Training Data: FFHQ
Name: styleganv1_ffhq_256_g8_25Mimg
Results:
- Dataset: FFHQ
Metrics:
FID50k: 6.09
P&R50k_full: 70.228/27.050
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/styleganv1/styleganv1_ffhq_256_g8_25Mimg_20210407_161748-0094da86.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv1/styleganv1_ffhq_1024_g8_25Mimg.py
In Collection: StyleGANv1
Metadata:
Training Data: FFHQ
Name: styleganv1_ffhq_1024_g8_25Mimg
Results:
- Dataset: FFHQ
Metrics:
FID50k: 4.056
P&R50k_full: 70.302/36.869
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/styleganv1/styleganv1_ffhq_1024_g8_25Mimg_20210407_161627-850a7234.pth
_base_ = [
'../_base_/models/stylegan/styleganv1_base.py',
'../_base_/datasets/grow_scale_imgs_ffhq_styleganv1.py',
'../_base_/default_runtime.py',
]
model = dict(generator=dict(out_size=1024), discriminator=dict(in_size=1024))
train_cfg = dict(
nkimgs_per_scale={
'8': 1200,
'16': 1200,
'32': 1200,
'64': 1200,
'128': 1200,
'256': 1200,
'512': 1200,
'1024': 166000
})
checkpoint_config = dict(interval=5000, by_epoch=False, max_keep_ckpts=20)
lr_config = None
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(type='PGGANFetchDataHook', interval=1),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
total_iters = 670000
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-1024-50k-rgb.pkl',
bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
_base_ = [
'../_base_/models/stylegan/styleganv1_base.py',
'../_base_/datasets/grow_scale_imgs_ffhq_styleganv1.py',
'../_base_/default_runtime.py',
]
model = dict(generator=dict(out_size=256), discriminator=dict(in_size=256))
train_cfg = dict(nkimgs_per_scale={
'8': 1200,
'16': 1200,
'32': 1200,
'64': 1200,
'128': 1200,
'256': 190000
})
checkpoint_config = dict(interval=5000, by_epoch=False, max_keep_ckpts=20)
lr_config = None
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(type='PGGANFetchDataHook', interval=1),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
total_iters = 670000
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
Collections:
- Metadata:
Architecture:
- StyleGANv2
Name: StyleGANv2
Paper:
- https://openaccess.thecvf.com/content_CVPR_2020/html/Karras_Analyzing_and_Improving_the_Image_Quality_of_StyleGAN_CVPR_2020_paper.html
README: configs/styleganv2/README.md
Models:
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: official weight
FID50k: 2.8134
P&R50k: 62.856/49.400
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_lsun-car_384x512_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: LSUN
Name: stylegan2_c2_lsun-car_384x512_b4x8
Results:
- Dataset: LSUN
Metrics:
Comment: official weight
FID50k: 5.4316
P&R50k: 65.986/48.190
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-car-config-f-official_20210327_172340-8cfe053c.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_lsun-horse_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: LSUN
Name: stylegan2_c2_lsun-horse_256_b4x8_800k
Results:
- Dataset: LSUN
Metrics:
Comment: official weight
FID50k: '-'
P&R50k: '-'
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-horse-config-f-official_20210327_173203-ef3e69ca.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_lsun-church_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: LSUN
Name: stylegan2_c2_lsun-church_256_b4x8_800k
Results:
- Dataset: LSUN
Metrics:
Comment: official weight
FID50k: '-'
P&R50k: '-'
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-church-config-f-official_20210327_172657-1d42b7d1.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_lsun-cat_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: LSUN
Name: stylegan2_c2_lsun-cat_256_b4x8_800k
Results:
- Dataset: LSUN
Metrics:
Comment: official weight
FID50k: '-'
P&R50k: '-'
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-cat-config-f-official_20210327_172444-15bc485b.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_256_b4x8_800k
Results:
- Dataset: FFHQ
Metrics:
Comment: our training
FID50k: 3.992
P&R50k: 69.012/40.417
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_ffhq_256_b4x8_20210407_160709-7890ae1f.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: our training
FID50k: 2.8185
P&R50k: 68.236/49.583
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_ffhq_1024_b4x8_20210407_150045-618c9024.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_lsun-car_384x512_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: LSUN
Name: stylegan2_c2_lsun-car_384x512_b4x8
Results:
- Dataset: LSUN
Metrics:
Comment: our training
FID50k: 2.4116
P&R50k: 66.760/50.576
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_lsun-car_384x512_b4x8_1800k_20210424_160929-fc9072ca.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_256_b4x8_800k
Results:
- Dataset: FFHQ
Metrics:
Comment: baseline
FID50k: 3.992
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_ffhq_256_b4x8_20210407_160709-7890ae1f.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_fp16_partial-GD_PL-no-scaler_ffhq_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_fp16_partial-GD_PL-no-scaler_ffhq_256_b4x8_800k
Results:
- Dataset: FFHQ
Metrics:
Comment: partial layers in fp16
FID50k: 4.331
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_fp16_partial-GD_PL-no-scaler_ffhq_256_b4x8_800k_20210508_114854-dacbe4c9.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_fp16-globalG-partialD_PL-R1-no-scaler_ffhq_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_fp16-globalG-partialD_PL-R1-no-scaler_ffhq_256_b4x8_800k
Results:
- Dataset: FFHQ
Metrics:
Comment: the whole G in fp16
FID50k: 4.362
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_fp16-globalG-partialD_PL-R1-no-scaler_ffhq_256_b4x8_800k_20210508_114930-ef8270d4.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_apex_fp16_PL-R1-no-scaler_ffhq_256_b4x8_800k.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_apex_fp16_PL-R1-no-scaler_ffhq_256_b4x8_800k
Results:
- Dataset: FFHQ
Metrics:
Comment: the whole G&D in fp16 + two loss scaler
FID50k: 4.614
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_apex_fp16_PL-R1-no-scaler_ffhq_256_b4x8_800k_20210508_114701-c2bb8afd.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: official weight
FID Version: Tero's StyleGAN
FID50k: 2.8732
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: our training
FID Version: Tero's StyleGAN
FID50k: 2.9413
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_ffhq_1024_b4x8_20210407_150045-618c9024.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: official weight
FID Version: Our PyTorch
FID50k: 2.8134
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth
- Config: https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv2/stylegan2_c2_ffhq_1024_b4x8.py
In Collection: StyleGANv2
Metadata:
Training Data: FFHQ
Name: stylegan2_c2_ffhq_1024_b4x8
Results:
- Dataset: FFHQ
Metrics:
Comment: our training
FID Version: Our PyTorch
FID50k: 2.8185
Task: Unconditional GANs
Weights: https://download.openmmlab.com/mmgen/stylegan2/stylegan2_c2_ffhq_1024_b4x8_20210407_150045-618c9024.pth
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
disc_auxiliary_loss=dict(use_apex_amp=False),
gen_auxiliary_loss=dict(use_apex_amp=False),
)
total_iters = 800002
apex_amp = dict(mode='gan', init_args=dict(opt_level='O1', num_losses=2))
resume_from = None
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
generator=dict(out_size=256),
discriminator=dict(in_size=256, convert_input_fp32=False),
# disc_auxiliary_loss=dict(use_apex_amp=True),
# gen_auxiliary_loss=dict(use_apex_amp=True),
)
dataset_type = 'QuickTestImageDataset'
data = dict(
samples_per_gpu=2,
train=dict(type=dataset_type, size=(256, 256)),
val=dict(type=dataset_type, size=(256, 256)))
log_config = dict(interval=1)
total_iters = 800002
apex_amp = dict(
mode='gan', init_args=dict(opt_level='O1', num_losses=2, loss_scale=512.))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
ema_half_life = 10. # G_smoothing_kimg
model = dict(generator=dict(out_size=1024), discriminator=dict(in_size=1024))
data = dict(
samples_per_gpu=2,
train=dict(dataset=dict(imgs_root='./data/images')),
val=dict(imgs_root='./data/images'))
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-1024-50k-rgb.pkl',
bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-1024-50k-rgb.pkl',
bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
total_iters = 800002
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=256), discriminator=dict(in_size=256))
data = dict(
samples_per_gpu=4,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_256')),
val=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_256'))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
total_iters = 800002
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
generator=dict(out_size=256, fp16_enabled=True),
discriminator=dict(in_size=256, fp16_enabled=False, num_fp16_scales=4),
)
total_iters = 800000
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
fp16_loss_scaler=dict(init_scale=512),
is_dynamic_ddp= # noqa
False, # Note that this flag should be False to use DDP wrapper.
)
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
generator=dict(out_size=256, fp16_enabled=True),
discriminator=dict(in_size=256, fp16_enabled=True),
disc_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
# gen_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
)
dataset_type = 'QuickTestImageDataset'
data = dict(
samples_per_gpu=2,
train=dict(type=dataset_type, size=(256, 256)),
val=dict(type=dataset_type, size=(256, 256)))
log_config = dict(interval=1)
total_iters = 800002
runner = dict(fp16_loss_scaler=dict(init_scale=512))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
generator=dict(out_size=256, num_fp16_scales=4),
discriminator=dict(in_size=256, num_fp16_scales=4),
disc_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
# gen_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
)
total_iters = 800002
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
fp16_loss_scaler=dict(init_scale=512),
is_dynamic_ddp= # noqa
False, # Note that this flag should be False to use DDP wrapper.
)
"""Config for the `config-f` setting in StyleGAN2."""
_base_ = ['./stylegan2_c2_ffhq_256_b4x8_800k.py']
model = dict(
generator=dict(out_size=256, num_fp16_scales=4),
discriminator=dict(in_size=256, num_fp16_scales=4),
disc_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
# gen_auxiliary_loss=dict(data_info=dict(loss_scaler='loss_scaler')),
)
dataset_type = 'QuickTestImageDataset'
data = dict(
samples_per_gpu=2,
train=dict(type=dataset_type, size=(256, 256)),
val=dict(type=dataset_type, size=(256, 256)))
log_config = dict(interval=1)
total_iters = 800002
runner = dict(fp16_loss_scaler=dict(init_scale=512))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
_base_ = [
'../_base_/datasets/lsun-car_pad_512.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=512), discriminator=dict(in_size=512))
data = dict(
samples_per_gpu=4,
train=dict(dataset=dict(imgs_root='./data/lsun/images/car')),
val=dict(imgs_root='./data/lsun/images/car'))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=40)
lr_config = None
total_iters = 1800002
metrics = dict(
fid50k=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/lsun-car-512-50k-rgb.pkl',
bgr2rgb=True),
sample_kwargs=dict(sample_model='ema'))
"""Note that this config is just for testing."""
_base_ = [
'../_base_/datasets/lsun_stylegan.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=256), discriminator=dict(in_size=256))
data = dict(
samples_per_gpu=4, train=dict(dataset=dict(imgs_root='./data/lsun-cat')))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
total_iters = 800002 # need to modify
metrics = dict(
fid50k=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
"""Note that this config is just for testing."""
_base_ = [
'../_base_/datasets/lsun_stylegan.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=256), discriminator=dict(in_size=256))
data = dict(
samples_per_gpu=4,
train=dict(dataset=dict(imgs_root='./data/lsun-church')))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
total_iters = 800002 # need to modify
metrics = dict(
fid50k=dict(
type='FID', num_images=50000, inception_pkl=None, bgr2rgb=True),
pr50k3=dict(type='PR', num_images=50000, k=3),
ppl_wend=dict(type='PPL', space='W', sampling='end', num_images=50000))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment