Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
stylegan2_mmcv
Commits
1401de15
Commit
1401de15
authored
Jun 28, 2024
by
dongchy920
Browse files
stylegan2_mmcv
parents
Pipeline
#1274
canceled with stages
Changes
463
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1847 additions
and
0 deletions
+1847
-0
build/lib/mmgen/.mim/configs/_base_/models/wgangp/wgangp_base.py
...ib/mmgen/.mim/configs/_base_/models/wgangp/wgangp_base.py
+35
-0
build/lib/mmgen/.mim/configs/ada/metafile.yml
build/lib/mmgen/.mim/configs/ada/metafile.yml
+22
-0
build/lib/mmgen/.mim/configs/ada/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
...s/ada/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
+99
-0
build/lib/mmgen/.mim/configs/ada/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
...s/ada/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
+96
-0
build/lib/mmgen/.mim/configs/biggan/biggan-deep_128x128_cvt_hugging-face_rgb.py
...onfigs/biggan/biggan-deep_128x128_cvt_hugging-face_rgb.py
+35
-0
build/lib/mmgen/.mim/configs/biggan/biggan-deep_256x256_cvt_hugging-face_rgb.py
...onfigs/biggan/biggan-deep_256x256_cvt_hugging-face_rgb.py
+35
-0
build/lib/mmgen/.mim/configs/biggan/biggan-deep_512x512_cvt_hugging-face_rgb.py
...onfigs/biggan/biggan-deep_512x512_cvt_hugging-face_rgb.py
+35
-0
build/lib/mmgen/.mim/configs/biggan/biggan_128x128_cvt_BigGAN-PyTorch_rgb.py
...m/configs/biggan/biggan_128x128_cvt_BigGAN-PyTorch_rgb.py
+33
-0
build/lib/mmgen/.mim/configs/biggan/biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k.py
...iggan/biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k.py
+62
-0
build/lib/mmgen/.mim/configs/biggan/biggan_cifar10_32x32_b25x2_500k.py
...en/.mim/configs/biggan/biggan_cifar10_32x32_b25x2_500k.py
+60
-0
build/lib/mmgen/.mim/configs/biggan/biggan_torch-sn_imagenet1k_128x128_b32x8_1500k.py
.../biggan/biggan_torch-sn_imagenet1k_128x128_b32x8_1500k.py
+65
-0
build/lib/mmgen/.mim/configs/biggan/metafile.yml
build/lib/mmgen/.mim/configs/biggan/metafile.yml
+93
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_facades_b1x1_80k.py
...cyclegan/cyclegan_lsgan_id0_resnet_in_facades_b1x1_80k.py
+161
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k.py
...gan/cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k.py
+158
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k.py
...n/cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k.py
+159
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_facades_b1x1_80k.py
...igs/cyclegan/cyclegan_lsgan_resnet_in_facades_b1x1_80k.py
+176
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k.py
...yclegan/cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k.py
+172
-0
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k.py
...legan/cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k.py
+174
-0
build/lib/mmgen/.mim/configs/cyclegan/metafile.yml
build/lib/mmgen/.mim/configs/cyclegan/metafile.yml
+141
-0
build/lib/mmgen/.mim/configs/dcgan/dcgan_celeba-cropped_64_b128x1_300k.py
....mim/configs/dcgan/dcgan_celeba-cropped_64_b128x1_300k.py
+36
-0
No files found.
Too many changes to show.
To preserve performance only
463 of 463+
files are displayed.
Plain diff
Email patch
build/lib/mmgen/.mim/configs/_base_/models/wgangp/wgangp_base.py
0 → 100644
View file @
1401de15
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
type
=
'WGANGPGenerator'
,
noise_size
=
128
,
out_scale
=
128
),
discriminator
=
dict
(
type
=
'WGANGPDiscriminator'
,
in_channel
=
3
,
in_scale
=
128
,
conv_module_cfg
=
dict
(
conv_cfg
=
None
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
True
,
act_cfg
=
dict
(
type
=
'LeakyReLU'
,
negative_slope
=
0.2
),
norm_cfg
=
dict
(
type
=
'GN'
),
order
=
(
'conv'
,
'norm'
,
'act'
))),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan'
),
disc_auxiliary_loss
=
[
dict
(
type
=
'GradientPenaltyLoss'
,
loss_weight
=
10
,
norm_mode
=
'HWC'
,
data_info
=
dict
(
discriminator
=
'disc'
,
real_data
=
'real_imgs'
,
fake_data
=
'fake_imgs'
))
])
train_cfg
=
dict
(
disc_steps
=
5
)
test_cfg
=
None
optimizer
=
dict
(
generator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.5
,
0.9
)),
discriminator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.5
,
0.9
)))
build/lib/mmgen/.mim/configs/ada/metafile.yml
0 → 100644
View file @
1401de15
Collections
:
-
Metadata
:
Architecture
:
-
ADA
Name
:
ADA
Paper
:
-
https://arxiv.org/pdf/2006.06676.pdf
README
:
configs/ada/README.md
Models
:
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
In Collection
:
ADA
Metadata
:
Training Data
:
Others
Name
:
stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8
Results
:
-
Dataset
:
Others
Metrics
:
FID50k
:
15.09
Iter
:
130000.0
Log
:
'
[log]'
Task
:
Tricks for GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8_best_fid_iter_130000_20220401_115101-f2ef498e.pth
build/lib/mmgen/.mim/configs/ada/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/ffhq_flip.py'
,
'../_base_/default_runtime.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
65536
,
'channel_max'
:
1024
,
'magnitude_ema_beta'
:
0.999
,
'conv_kernel'
:
1
,
'use_radial_filters'
:
True
}
r1_gamma
=
3.3
# set by user
d_reg_interval
=
16
load_from
=
'https://download.openmmlab.com/mmgen/stylegan3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb_20220329_234933-ac0500a1.pth'
# noqa
# ada settings
aug_kwargs
=
{
'xflip'
:
1
,
'rotate90'
:
1
,
'xint'
:
1
,
'scale'
:
1
,
'rotate'
:
1
,
'aniso'
:
1
,
'xfrac'
:
1
,
'brightness'
:
1
,
'contrast'
:
1
,
'lumaflip'
:
1
,
'hue'
:
1
,
'saturation'
:
1
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
type
=
'ADAStyleGAN2Discriminator'
,
in_size
=
1024
,
input_bgr2rgb
=
True
,
data_aug
=
dict
(
type
=
'ADAAug'
,
aug_pipeline
=
aug_kwargs
,
ada_kimg
=
100
)),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
'data/metfaces/images/'
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
ema_kimg
=
10
ema_nimg
=
ema_kimg
*
1000
ema_beta
=
0.5
**
(
32
/
max
(
ema_nimg
,
1e-8
))
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interp_cfg
=
dict
(
momentum
=
ema_beta
),
interval
=
1
,
start_iter
=
0
,
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/metface_1024x1024_noflip.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
dict
(
milestones
=
[
100000
],
interval
=
[
10000
,
5000
]),
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
lr_config
=
None
total_iters
=
160000
build/lib/mmgen/.mim/configs/ada/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/ffhq_flip.py'
,
'../_base_/default_runtime.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
r1_gamma
=
6.6
# set by user
d_reg_interval
=
16
load_from
=
'https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb_20220329_235113-db6c6580.pth'
# noqa
# ada settings
aug_kwargs
=
{
'xflip'
:
1
,
'rotate90'
:
1
,
'xint'
:
1
,
'scale'
:
1
,
'rotate'
:
1
,
'aniso'
:
1
,
'xfrac'
:
1
,
'brightness'
:
1
,
'contrast'
:
1
,
'lumaflip'
:
1
,
'hue'
:
1
,
'saturation'
:
1
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
type
=
'ADAStyleGAN2Discriminator'
,
in_size
=
1024
,
input_bgr2rgb
=
True
,
data_aug
=
dict
(
type
=
'ADAAug'
,
aug_pipeline
=
aug_kwargs
,
ada_kimg
=
100
)),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
'data/metfaces/images/'
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
ema_kimg
=
10
ema_nimg
=
ema_kimg
*
1000
ema_beta
=
0.5
**
(
32
/
max
(
ema_nimg
,
1e-8
))
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interp_cfg
=
dict
(
momentum
=
ema_beta
),
interval
=
1
,
start_iter
=
0
,
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/metface_1024x1024_noflip.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
dict
(
milestones
=
[
80000
],
interval
=
[
10000
,
5000
]),
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
lr_config
=
None
total_iters
=
160000
build/lib/mmgen/.mim/configs/biggan/biggan-deep_128x128_cvt_hugging-face_rgb.py
0 → 100644
View file @
1401de15
model
=
dict
(
type
=
'BasiccGAN'
,
generator
=
dict
(
type
=
'BigGANDeepGenerator'
,
output_scale
=
128
,
noise_size
=
128
,
num_classes
=
1000
,
base_channels
=
128
,
shared_dim
=
128
,
with_shared_embedding
=
True
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
concat_noise
=
True
,
auto_sync_bn
=
False
,
rgb2bgr
=
True
),
discriminator
=
dict
(
type
=
'BigGANDeepDiscriminator'
,
input_scale
=
128
,
num_classes
=
1000
,
base_channels
=
128
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
with_spectral_norm
=
True
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'hinge'
))
train_cfg
=
dict
(
disc_steps
=
8
,
gen_steps
=
1
,
batch_accumulation_steps
=
8
,
use_ema
=
True
)
test_cfg
=
None
optimizer
=
dict
(
generator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
),
discriminator
=
dict
(
type
=
'Adam'
,
lr
=
0.0004
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
))
build/lib/mmgen/.mim/configs/biggan/biggan-deep_256x256_cvt_hugging-face_rgb.py
0 → 100644
View file @
1401de15
model
=
dict
(
type
=
'BasiccGAN'
,
generator
=
dict
(
type
=
'BigGANDeepGenerator'
,
output_scale
=
256
,
noise_size
=
128
,
num_classes
=
1000
,
base_channels
=
128
,
shared_dim
=
128
,
with_shared_embedding
=
True
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
concat_noise
=
True
,
auto_sync_bn
=
False
,
rgb2bgr
=
True
),
discriminator
=
dict
(
type
=
'BigGANDeepDiscriminator'
,
input_scale
=
256
,
num_classes
=
1000
,
base_channels
=
128
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
with_spectral_norm
=
True
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'hinge'
))
train_cfg
=
dict
(
disc_steps
=
8
,
gen_steps
=
1
,
batch_accumulation_steps
=
8
,
use_ema
=
True
)
test_cfg
=
None
optimizer
=
dict
(
generator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
),
discriminator
=
dict
(
type
=
'Adam'
,
lr
=
0.0004
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
))
build/lib/mmgen/.mim/configs/biggan/biggan-deep_512x512_cvt_hugging-face_rgb.py
0 → 100644
View file @
1401de15
model
=
dict
(
type
=
'BasiccGAN'
,
generator
=
dict
(
type
=
'BigGANDeepGenerator'
,
output_scale
=
512
,
noise_size
=
128
,
num_classes
=
1000
,
base_channels
=
128
,
shared_dim
=
128
,
with_shared_embedding
=
True
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
concat_noise
=
True
,
auto_sync_bn
=
False
,
rgb2bgr
=
True
),
discriminator
=
dict
(
type
=
'BigGANDeepDiscriminator'
,
input_scale
=
512
,
num_classes
=
1000
,
base_channels
=
128
,
sn_eps
=
1e-6
,
sn_style
=
'torch'
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
with_spectral_norm
=
True
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'hinge'
))
train_cfg
=
dict
(
disc_steps
=
8
,
gen_steps
=
1
,
batch_accumulation_steps
=
8
,
use_ema
=
True
)
test_cfg
=
None
optimizer
=
dict
(
generator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
),
discriminator
=
dict
(
type
=
'Adam'
,
lr
=
0.0004
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
))
build/lib/mmgen/.mim/configs/biggan/biggan_128x128_cvt_BigGAN-PyTorch_rgb.py
0 → 100644
View file @
1401de15
model
=
dict
(
type
=
'BasiccGAN'
,
generator
=
dict
(
type
=
'BigGANGenerator'
,
output_scale
=
128
,
noise_size
=
120
,
num_classes
=
1000
,
base_channels
=
96
,
shared_dim
=
128
,
with_shared_embedding
=
True
,
sn_eps
=
1e-6
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
split_noise
=
True
,
auto_sync_bn
=
False
,
rgb2bgr
=
True
),
discriminator
=
dict
(
type
=
'BigGANDiscriminator'
,
input_scale
=
128
,
num_classes
=
1000
,
base_channels
=
96
,
sn_eps
=
1e-6
,
init_type
=
'ortho'
,
act_cfg
=
dict
(
type
=
'ReLU'
,
inplace
=
True
),
with_spectral_norm
=
True
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'hinge'
))
train_cfg
=
dict
(
disc_steps
=
8
,
gen_steps
=
1
,
batch_accumulation_steps
=
8
,
use_ema
=
True
)
test_cfg
=
None
optimizer
=
dict
(
generator
=
dict
(
type
=
'Adam'
,
lr
=
0.0001
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
),
discriminator
=
dict
(
type
=
'Adam'
,
lr
=
0.0004
,
betas
=
(
0.0
,
0.999
),
eps
=
1e-6
))
build/lib/mmgen/.mim/configs/biggan/biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/biggan/biggan_128x128.py'
,
'../_base_/datasets/imagenet_noaug_128.py'
,
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu`
data
=
dict
(
samples_per_gpu
=
32
,
workers_per_gpu
=
8
)
# adjust running config
lr_config
=
None
checkpoint_config
=
dict
(
interval
=
5000
,
by_epoch
=
False
,
max_keep_ckpts
=
10
)
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
10000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interval
=
8
,
start_iter
=
160000
,
interp_cfg
=
dict
(
momentum
=
0.9999
,
momentum_nontrainable
=
0.9999
),
priority
=
'VERY_HIGH'
)
]
# Traning sets' datasize 1,281,167
total_iters
=
1500000
# use ddp wrapper for faster training
use_ddp_wrapper
=
True
find_unused_parameters
=
False
runner
=
dict
(
type
=
'DynamicIterBasedRunner'
,
is_dynamic_ddp
=
False
,
# Note that this flag should be False.
pass_training_status
=
True
)
# Note set your inception_pkl's path
inception_pkl
=
'work_dirs/inception_pkl/imagenet.pkl'
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
50000
)
],
sample_kwargs
=
dict
(
sample_model
=
'ema'
),
best_metric
=
[
'fid'
,
'is'
])
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
,
inception_args
=
dict
(
type
=
'StyleGAN'
)),
is50k
=
dict
(
type
=
'IS'
,
num_images
=
50000
))
build/lib/mmgen/.mim/configs/biggan/biggan_cifar10_32x32_b25x2_500k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/biggan/biggan_32x32.py'
,
'../_base_/datasets/cifar10_noaug.py'
,
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu`
data
=
dict
(
samples_per_gpu
=
25
,
workers_per_gpu
=
8
)
# adjust running config
lr_config
=
None
checkpoint_config
=
dict
(
interval
=
5000
,
by_epoch
=
False
,
max_keep_ckpts
=
20
)
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interval
=
4
,
start_iter
=
4000
,
interp_cfg
=
dict
(
momentum
=
0.9999
),
priority
=
'VERY_HIGH'
)
]
total_iters
=
500000
# use ddp wrapper for faster training
use_ddp_wrapper
=
True
find_unused_parameters
=
False
runner
=
dict
(
type
=
'DynamicIterBasedRunner'
,
is_dynamic_ddp
=
False
,
# Note that this flag should be False.
pass_training_status
=
True
)
# Note set your inception_pkl's path
inception_pkl
=
None
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
50000
)
],
sample_kwargs
=
dict
(
sample_model
=
'ema'
),
best_metric
=
[
'fid'
,
'is'
])
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
),
is50k
=
dict
(
type
=
'IS'
,
num_images
=
50000
))
build/lib/mmgen/.mim/configs/biggan/biggan_torch-sn_imagenet1k_128x128_b32x8_1500k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/biggan/biggan_128x128.py'
,
'../_base_/datasets/imagenet_noaug_128.py'
,
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu`
data
=
dict
(
samples_per_gpu
=
32
,
workers_per_gpu
=
8
)
model
=
dict
(
generator
=
dict
(
sn_style
=
'torch'
),
discriminator
=
dict
(
sn_style
=
'torch'
))
# adjust running config
lr_config
=
None
checkpoint_config
=
dict
(
interval
=
5000
,
by_epoch
=
False
,
max_keep_ckpts
=
10
)
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
10000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interval
=
8
,
start_iter
=
160000
,
interp_cfg
=
dict
(
momentum
=
0.9999
,
momentum_nontrainable
=
0.9999
),
priority
=
'VERY_HIGH'
)
]
# Traning sets' datasize 1,281,167
total_iters
=
1500000
# use ddp wrapper for faster training
use_ddp_wrapper
=
True
find_unused_parameters
=
False
runner
=
dict
(
type
=
'DynamicIterBasedRunner'
,
is_dynamic_ddp
=
False
,
# Note that this flag should be False.
pass_training_status
=
True
)
# Note set your inception_pkl's path
inception_pkl
=
'work_dirs/inception_pkl/imagenet.pkl'
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
50000
)
],
sample_kwargs
=
dict
(
sample_model
=
'ema'
),
best_metric
=
[
'fid'
,
'is'
])
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
bgr2rgb
=
True
,
inception_args
=
dict
(
type
=
'StyleGAN'
)),
is50k
=
dict
(
type
=
'IS'
,
num_images
=
50000
))
build/lib/mmgen/.mim/configs/biggan/metafile.yml
0 → 100644
View file @
1401de15
Collections
:
-
Metadata
:
Architecture
:
-
BigGAN
Name
:
BigGAN
Paper
:
-
https://openreview.net/forum?id=B1xsqj09Fm
README
:
configs/biggan/README.md
Models
:
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/biggan/biggan_cifar10_32x32_b25x2_500k.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
CIFAR
Name
:
biggan_cifar10_32x32_b25x2_500k
Results
:
-
Dataset
:
CIFAR
Metrics
:
FID
:
9.78
IS
:
8.7
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan_cifar10_32x32_b25x2_500k_20210728_110906-08b61a44.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/biggan/biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
IMAGENET
Name
:
biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k
Results
:
-
Dataset
:
IMAGENET
Metrics
:
FID
:
8.69
IS
:
101.15
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan_imagenet1k_128x128_b32x8_best_fid_iter_1232000_20211111_122548-5315b13d.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/biggan/biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
IMAGENET
Name
:
biggan_ajbrock-sn_imagenet1k_128x128_b32x8_1500k
Results
:
-
Dataset
:
IMAGENET
Metrics
:
FID
:
13.51
IS
:
129.07
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan_imagenet1k_128x128_b32x8_best_is_iter_1328000_20211111_122911-28c688bc.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/_base_/models/biggan/biggan_128x128_cvt_BigGAN-PyTorch_rgb.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
Others
Name
:
biggan_128x128_cvt_BigGAN-PyTorch_rgb
Results
:
-
Dataset
:
Others
Metrics
:
FID
:
10.1414
IS
:
96.728
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan_imagenet1k_128x128_cvt_BigGAN-PyTorch_rgb_20210730_125223-3e353fef.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/_base_/models/biggan/biggan-deep_128x128_cvt_hugging-face_rgb.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
Others
Name
:
biggan-deep_128x128_cvt_hugging-face_rgb
Results
:
-
Dataset
:
Others
Metrics
:
FID
:
5.9471
IS
:
107.161
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan-deep_imagenet1k_128x128_cvt_hugging-face_rgb_20210728_111659-099e96f9.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/_base_/models/biggan/biggan-deep_256x256_cvt_hugging-face_rgb.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
Others
Name
:
biggan-deep_256x256_cvt_hugging-face_rgb
Results
:
-
Dataset
:
Others
Metrics
:
FID
:
11.3151
IS
:
135.107
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan-deep_imagenet1k_256x256_cvt_hugging-face_rgb_20210728_111735-28651569.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/blob/master/configs/_base_/models/biggan/biggan-deep_512x512_cvt_hugging-face_rgb.py
In Collection
:
BigGAN
Metadata
:
Training Data
:
Others
Name
:
biggan-deep_512x512_cvt_hugging-face_rgb
Results
:
-
Dataset
:
Others
Metrics
:
FID
:
16.8728
IS
:
124.368
Task
:
Conditional GANs
Weights
:
https://download.openmmlab.com/mmgen/biggan/biggan-deep_imagenet1k_512x512_cvt_hugging-face_rgb_20210728_112346-a42585f2.pth
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_facades_b1x1_80k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
train_cfg
=
dict
(
buffer_size
=
50
)
test_cfg
=
None
domain_a
=
'mask'
domain_b
=
'photo'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
)
])
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
dataroot
=
'./data/unpaired_facades'
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
40000
,
interval
=
400
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
80000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_facades_id0'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
num_images
=
106
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
domain_a
=
'horse'
domain_b
=
'zebra'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
)
])
dataroot
=
'./data/horse2zebra'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
135000
,
interval
=
1350
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
270000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_horse2zebra_id0'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
num_images
=
140
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
domain_a
=
'summer'
domain_b
=
'winter'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
)
])
dataroot
=
'./data/summer2winter_yosemite'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
125000
,
interval
=
1250
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
250000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_summer2winter_id0'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
# testA: 309, testB:238
num_images
=
238
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_facades_b1x1_80k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
train_cfg
=
dict
(
buffer_size
=
50
)
test_cfg
=
None
domain_a
=
'mask'
domain_b
=
'photo'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
),
reduction
=
'mean'
)
])
dataroot
=
'./data/unpaired_facades'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
40000
,
interval
=
400
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
80000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_facades'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
num_images
=
106
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
domain_a
=
'horse'
domain_b
=
'zebra'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
),
reduction
=
'mean'
)
])
dataroot
=
'./data/horse2zebra'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
135000
,
interval
=
1350
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
270000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_horse2zebra'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
# testA 120, testB 140
num_images
=
140
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/cyclegan/cyclegan_lsgan_resnet.py'
,
'../_base_/datasets/unpaired_imgs_256x256.py'
,
'../_base_/default_runtime.py'
]
domain_a
=
'summer'
domain_b
=
'winter'
model
=
dict
(
default_domain
=
domain_b
,
reachable_domains
=
[
domain_a
,
domain_b
],
related_domains
=
[
domain_a
,
domain_b
],
gen_auxiliary_loss
=
[
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
10.0
,
loss_name
=
'cycle_loss'
,
data_info
=
dict
(
pred
=
f
'cycle_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
,
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_a
}
'
,
target
=
f
'real_
{
domain_a
}
'
),
reduction
=
'mean'
),
dict
(
type
=
'L1Loss'
,
loss_weight
=
0.5
,
loss_name
=
'id_loss'
,
data_info
=
dict
(
pred
=
f
'identity_
{
domain_b
}
'
,
target
=
f
'real_
{
domain_b
}
'
),
reduction
=
'mean'
)
])
dataroot
=
'./data/summer2winter_yosemite'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'Crop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
),
random_crop
=
True
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_a
}
'
,
flag
=
'color'
),
dict
(
type
=
'LoadImageFromFile'
,
io_backend
=
'disk'
,
key
=
f
'img_
{
domain_b
}
'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
]),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
train
=
dict
(
dataroot
=
dataroot
,
pipeline
=
train_pipeline
,
domain_a
=
domain_a
,
domain_b
=
domain_b
),
val
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
),
test
=
dict
(
dataroot
=
dataroot
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
pipeline
=
test_pipeline
))
optimizer
=
dict
(
generators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)),
discriminators
=
dict
(
type
=
'Adam'
,
lr
=
0.0002
,
betas
=
(
0.5
,
0.999
)))
# learning policy
lr_config
=
dict
(
policy
=
'Linear'
,
by_epoch
=
False
,
target_lr
=
0
,
start
=
125000
,
interval
=
1250
)
checkpoint_config
=
dict
(
interval
=
10000
,
save_optimizer
=
True
,
by_epoch
=
False
)
custom_hooks
=
[
dict
(
type
=
'MMGenVisualizationHook'
,
output_dir
=
'training_samples'
,
res_name_list
=
[
f
'fake_
{
domain_a
}
'
,
f
'fake_
{
domain_b
}
'
],
interval
=
5000
)
]
runner
=
None
use_ddp_wrapper
=
True
total_iters
=
250000
workflow
=
[(
'train'
,
1
)]
exp_name
=
'cyclegan_summer2winter'
work_dir
=
f
'./work_dirs/experiments/
{
exp_name
}
'
# testA: 309, testB:238
num_images
=
238
metrics
=
dict
(
FID
=
dict
(
type
=
'FID'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
)),
IS
=
dict
(
type
=
'IS'
,
num_images
=
num_images
,
image_shape
=
(
3
,
256
,
256
),
inception_args
=
dict
(
type
=
'pytorch'
)))
evaluation
=
dict
(
type
=
'TranslationEvalHook'
,
target_domain
=
domain_b
,
interval
=
10000
,
metrics
=
[
dict
(
type
=
'FID'
,
num_images
=
num_images
,
bgr2rgb
=
True
),
dict
(
type
=
'IS'
,
num_images
=
num_images
,
inception_args
=
dict
(
type
=
'pytorch'
))
],
best_metric
=
[
'fid'
,
'is'
])
build/lib/mmgen/.mim/configs/cyclegan/metafile.yml
0 → 100644
View file @
1401de15
Collections
:
-
Metadata
:
Architecture
:
-
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Name
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Paper
:
-
https://openaccess.thecvf.com/content_iccv_2017/html/Zhu_Unpaired_Image-To-Image_Translation_ICCV_2017_paper.html
README
:
configs/cyclegan/README.md
Models
:
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_resnet_in_facades_b1x1_80k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
FACADES
Name
:
cyclegan_lsgan_resnet_in_facades_b1x1_80k
Results
:
-
Dataset
:
FACADES
Metrics
:
FID
:
124.8033
IS
:
1.792
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_resnet_in_1x1_80k_facades_20210902_165905-5e2c0876.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_facades_b1x1_80k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
FACADES
Name
:
cyclegan_lsgan_id0_resnet_in_facades_b1x1_80k
Results
:
-
Dataset
:
FACADES
Metrics
:
FID
:
125.1694
IS
:
1.905
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_id0_resnet_in_1x1_80k_facades_convert-bgr_20210902_164411-d8e72b45.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
SUMMER2WINTER
Name
:
cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k
Results
:
-
Dataset
:
SUMMER2WINTER
Metrics
:
FID
:
83.7177
IS
:
2.771
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_resnet_in_1x1_246200_summer2winter_convert-bgr_20210902_165932-fcf08dc1.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
SUMMER2WINTER
Name
:
cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k
Results
:
-
Dataset
:
SUMMER2WINTER
Metrics
:
FID
:
83.1418
IS
:
2.72
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_id0_resnet_in_1x1_246200_summer2winter_convert-bgr_20210902_165640-8b825581.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
SUMMER2WINTER
Name
:
cyclegan_lsgan_resnet_in_summer2winter_b1x1_250k
Results
:
-
Dataset
:
SUMMER2WINTER
Metrics
:
FID
:
72.8025
IS
:
3.129
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_resnet_in_1x1_246200_summer2winter_convert-bgr_20210902_165932-fcf08dc1.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
SUMMER2WINTER
Name
:
cyclegan_lsgan_id0_resnet_in_summer2winter_b1x1_250k
Results
:
-
Dataset
:
SUMMER2WINTER
Metrics
:
FID
:
73.5001
IS
:
3.107
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_id0_resnet_in_1x1_246200_summer2winter_convert-bgr_20210902_165640-8b825581.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
HORSE2ZEBRA
Name
:
cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k
Results
:
-
Dataset
:
HORSE2ZEBRA
Metrics
:
FID
:
64.5225
IS
:
1.418
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_resnet_in_1x1_266800_horse2zebra_convert-bgr_20210902_170004-a32c733a.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
HORSE2ZEBRA
Name
:
cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k
Results
:
-
Dataset
:
HORSE2ZEBRA
Metrics
:
FID
:
74.777
IS
:
1.542
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_id0_resnet_in_1x1_266800_horse2zebra_convert-bgr_20210902_165724-77c9c806.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
HORSE2ZEBRA
Name
:
cyclegan_lsgan_resnet_in_horse2zebra_b1x1_270k
Results
:
-
Dataset
:
HORSE2ZEBRA
Metrics
:
FID
:
141.1517
IS
:
3.154
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_resnet_in_1x1_266800_horse2zebra_convert-bgr_20210902_170004-a32c733a.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/cyclegan/cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k.py
In Collection
:
'
CycleGAN:
Unpaired
Image-to-Image
Translation
Using
Cycle-Consistent
Adversarial
Networks'
Metadata
:
Training Data
:
HORSE2ZEBRA
Name
:
cyclegan_lsgan_id0_resnet_in_horse2zebra_b1x1_270k
Results
:
-
Dataset
:
HORSE2ZEBRA
Metrics
:
FID
:
134.3728
IS
:
3.091
Task
:
Image2Image Translation
Weights
:
https://download.openmmlab.com/mmgen/cyclegan/refactor/cyclegan_lsgan_id0_resnet_in_1x1_266800_horse2zebra_convert-bgr_20210902_165724-77c9c806.pth
build/lib/mmgen/.mim/configs/dcgan/dcgan_celeba-cropped_64_b128x1_300k.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/dcgan/dcgan_64x64.py'
,
'../_base_/datasets/unconditional_imgs_64x64.py'
,
'../_base_/default_runtime.py'
]
# define dataset
# you must set `samples_per_gpu` and `imgs_root`
data
=
dict
(
samples_per_gpu
=
128
,
train
=
dict
(
imgs_root
=
'data/celeba-cropped/cropped_images_aligned_png'
))
# adjust running config
lr_config
=
None
checkpoint_config
=
dict
(
interval
=
10000
,
by_epoch
=
False
,
max_keep_ckpts
=
20
)
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
10000
)
]
total_iters
=
300002
# use ddp wrapper for faster training
use_ddp_wrapper
=
True
find_unused_parameters
=
False
runner
=
dict
(
type
=
'DynamicIterBasedRunner'
,
is_dynamic_ddp
=
False
,
# Note that this flag should be False.
pass_training_status
=
True
)
metrics
=
dict
(
ms_ssim10k
=
dict
(
type
=
'MS_SSIM'
,
num_images
=
10000
),
swd16k
=
dict
(
type
=
'SWD'
,
num_images
=
16384
,
image_shape
=
(
3
,
64
,
64
)))
Prev
1
2
3
4
5
6
7
8
…
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment