Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
stylegan2_mmcv
Commits
1401de15
Commit
1401de15
authored
Jun 28, 2024
by
dongchy920
Browse files
stylegan2_mmcv
parents
Pipeline
#1274
canceled with stages
Changes
463
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
971 additions
and
0 deletions
+971
-0
build/lib/mmgen/.mim/configs/styleganv2/stylegan2_c2_lsun-horse_256_b4x8_800k.py
...nfigs/styleganv2/stylegan2_c2_lsun-horse_256_b4x8_800k.py
+45
-0
build/lib/mmgen/.mim/configs/styleganv3/metafile.yml
build/lib/mmgen/.mim/configs/styleganv3/metafile.yml
+132
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
...ganv3/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
+99
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb.py
...tyleganv3/stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb.py
+21
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb.py
...styleganv3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb.py
+22
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb.py
...styleganv3/stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb.py
+18
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
...ganv3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
+96
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb.py
...tyleganv3/stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb.py
+16
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb.py
...styleganv3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb.py
+17
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb.py
...styleganv3/stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb.py
+16
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8.py
...yleganv3/stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8.py
+72
-0
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8.py
...eganv3/stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8.py
+73
-0
build/lib/mmgen/.mim/configs/wgan-gp/metafile.yml
build/lib/mmgen/.mim/configs/wgan-gp/metafile.yml
+35
-0
build/lib/mmgen/.mim/configs/wgan-gp/wgangp_GN_GP-50_lsun-bedroom_128_b64x1_160kiter.py
...gan-gp/wgangp_GN_GP-50_lsun-bedroom_128_b64x1_160kiter.py
+14
-0
build/lib/mmgen/.mim/configs/wgan-gp/wgangp_GN_celeba-cropped_128_b64x1_160kiter.py
...gs/wgan-gp/wgangp_GN_celeba-cropped_128_b64x1_160kiter.py
+25
-0
build/lib/mmgen/.mim/model-index.yml
build/lib/mmgen/.mim/model-index.yml
+18
-0
build/lib/mmgen/.mim/tools/deployment/mmgen2torchserver.py
build/lib/mmgen/.mim/tools/deployment/mmgen2torchserver.py
+114
-0
build/lib/mmgen/.mim/tools/deployment/mmgen_unconditional_handler.py
...mgen/.mim/tools/deployment/mmgen_unconditional_handler.py
+57
-0
build/lib/mmgen/.mim/tools/deployment/test_torchserver.py
build/lib/mmgen/.mim/tools/deployment/test_torchserver.py
+59
-0
build/lib/mmgen/.mim/tools/dist_eval.sh
build/lib/mmgen/.mim/tools/dist_eval.sh
+22
-0
No files found.
Too many changes to show.
To preserve performance only
463 of 463+
files are displayed.
Plain diff
Email patch
build/lib/mmgen/.mim/configs/styleganv2/stylegan2_c2_lsun-horse_256_b4x8_800k.py
0 → 100644
View file @
1401de15
"""Note that this config is just for testing."""
_base_
=
[
'../_base_/datasets/lsun_stylegan.py'
,
'../_base_/models/stylegan/stylegan2_base.py'
,
'../_base_/default_runtime.py'
]
model
=
dict
(
generator
=
dict
(
out_size
=
256
),
discriminator
=
dict
(
in_size
=
256
))
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
'./data/lsun-horse'
)))
ema_half_life
=
10.
# G_smoothing_kimg
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interval
=
1
,
interp_cfg
=
dict
(
momentum
=
0.5
**
(
32.
/
(
ema_half_life
*
1000.
))),
priority
=
'VERY_HIGH'
)
]
checkpoint_config
=
dict
(
interval
=
10000
,
by_epoch
=
False
,
max_keep_ckpts
=
30
)
lr_config
=
None
log_config
=
dict
(
interval
=
100
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
# dict(type='TensorboardLoggerHook'),
])
total_iters
=
800002
# need to modify
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
None
,
bgr2rgb
=
True
),
pr50k3
=
dict
(
type
=
'PR'
,
num_images
=
50000
,
k
=
3
),
ppl_wend
=
dict
(
type
=
'PPL'
,
space
=
'W'
,
sampling
=
'end'
,
num_images
=
50000
))
build/lib/mmgen/.mim/configs/styleganv3/metafile.yml
0 → 100644
View file @
1401de15
Collections
:
-
Metadata
:
Architecture
:
-
StyleGANv3
Name
:
StyleGANv3
Paper
:
-
https://nvlabs-fi-cdn.nvidia.com/stylegan3/stylegan3-paper.pdf
README
:
configs/styleganv3/README.md
Models
:
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv3/stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8
Results
:
-
Dataset
:
FFHQ
Metrics
:
FID50k
:
3.37<sup>\</sup>
Iter
:
490000.0
Log
:
'
[log]'
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8_best_fid_iter_490000_20220401_120733-4ff83434.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
Others
Name
:
stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8
Results
:
-
Dataset
:
Others
Metrics
:
FID50k
:
15.09
Iter
:
130000.0
Log
:
'
[log]'
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8_best_fid_iter_130000_20220401_115101-f2ef498e.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/styleganv3/stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8
Results
:
-
Dataset
:
FFHQ
Metrics
:
FID50k
:
7.65
Iter
:
740000.0
Log
:
'
[log]'
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8_best_fid_iter_740000_20220401_122456-730e1fba.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
FFHQ
Metrics
:
Comment
:
official weight
EQ-R
:
13.12
EQ-T
:
63.01
FID50k
:
4.62
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb_20220329_235046-153df4c8.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
Others
Name
:
stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
Others
Metrics
:
Comment
:
official weight
EQ-R
:
13.51
EQ-T
:
60.15
FID50k
:
4.04
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb_20220329_235017-ee6b037a.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
FFHQ
Metrics
:
Comment
:
official weight
EQ-R
:
13.82
EQ-T
:
61.21
FID50k
:
2.79
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb_20220329_235113-db6c6580.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
FFHQ
Metrics
:
Comment
:
official weight
EQ-R
:
40.48
EQ-T
:
66.65
FID50k
:
4.5
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb_20220329_234909-4521d963.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
Others
Name
:
stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
Others
Metrics
:
Comment
:
official weight
EQ-R
:
40.34
EQ-T
:
64.89
FID50k
:
4.4
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb_20220329_234829-f2eaca72.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/_base_/models/stylegan/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb.py
In Collection
:
StyleGANv3
Metadata
:
Training Data
:
FFHQ
Name
:
stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb
Results
:
-
Dataset
:
FFHQ
Metrics
:
Comment
:
official weight
EQ-R
:
46.62
EQ-T
:
64.76
FID50k
:
3.07
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/stylegan3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb_20220329_234933-ac0500a1.pth
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ada_fp16_gamma3.3_metfaces_1024_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/ffhq_flip.py'
,
'../_base_/default_runtime.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
65536
,
'channel_max'
:
1024
,
'magnitude_ema_beta'
:
0.999
,
'conv_kernel'
:
1
,
'use_radial_filters'
:
True
}
r1_gamma
=
3.3
# set by user
d_reg_interval
=
16
load_from
=
'https://download.openmmlab.com/mmgen/stylegan3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb_20220329_234933-ac0500a1.pth'
# noqa
# ada settings
aug_kwargs
=
{
'xflip'
:
1
,
'rotate90'
:
1
,
'xint'
:
1
,
'scale'
:
1
,
'rotate'
:
1
,
'aniso'
:
1
,
'xfrac'
:
1
,
'brightness'
:
1
,
'contrast'
:
1
,
'lumaflip'
:
1
,
'hue'
:
1
,
'saturation'
:
1
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
type
=
'ADAStyleGAN2Discriminator'
,
in_size
=
1024
,
input_bgr2rgb
=
True
,
data_aug
=
dict
(
type
=
'ADAAug'
,
aug_pipeline
=
aug_kwargs
,
ada_kimg
=
100
)),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
'data/metfaces/images/'
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
ema_kimg
=
10
ema_nimg
=
ema_kimg
*
1000
ema_beta
=
0.5
**
(
32
/
max
(
ema_nimg
,
1e-8
))
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interp_cfg
=
dict
(
momentum
=
ema_beta
),
interval
=
1
,
start_iter
=
0
,
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/metface_1024x1024_noflip.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
dict
(
milestones
=
[
100000
],
interval
=
[
10000
,
5000
]),
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
lr_config
=
None
total_iters
=
160000
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_afhqv2_512_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
65536
,
'channel_max'
:
1024
,
'magnitude_ema_beta'
:
0.999
,
'conv_kernel'
:
1
,
'use_radial_filters'
:
True
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
type
=
'StyleGANv3Generator'
,
noise_size
=
512
,
style_channels
=
512
,
out_size
=
512
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
type
=
'StyleGAN2Discriminator'
,
in_size
=
512
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ffhq_1024_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
65536
,
'channel_max'
:
1024
,
'magnitude_ema_beta'
:
0.999
,
'conv_kernel'
:
1
,
'use_radial_filters'
:
True
}
r1_gamma
=
32.8
d_reg_interval
=
16
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
synthesis_cfg
=
synthesis_cfg
,
rgb2bgr
=
True
),
discriminator
=
dict
(
type
=
'StyleGAN2Discriminator'
,
in_size
=
1024
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_r_ffhqu_256_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
1024
,
'magnitude_ema_beta'
:
0.999
,
'conv_kernel'
:
1
,
'use_radial_filters'
:
True
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
256
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
in_size
=
256
,
channel_multiplier
=
1
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ada_fp16_gamma6.6_metfaces_1024_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/ffhq_flip.py'
,
'../_base_/default_runtime.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
r1_gamma
=
6.6
# set by user
d_reg_interval
=
16
load_from
=
'https://download.openmmlab.com/mmgen/stylegan3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb_20220329_235113-db6c6580.pth'
# noqa
# ada settings
aug_kwargs
=
{
'xflip'
:
1
,
'rotate90'
:
1
,
'xint'
:
1
,
'scale'
:
1
,
'rotate'
:
1
,
'aniso'
:
1
,
'xfrac'
:
1
,
'brightness'
:
1
,
'contrast'
:
1
,
'lumaflip'
:
1
,
'hue'
:
1
,
'saturation'
:
1
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
type
=
'ADAStyleGAN2Discriminator'
,
in_size
=
1024
,
input_bgr2rgb
=
True
,
data_aug
=
dict
(
type
=
'ADAAug'
,
aug_pipeline
=
aug_kwargs
,
ada_kimg
=
100
)),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
'data/metfaces/images/'
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
ema_kimg
=
10
ema_nimg
=
ema_kimg
*
1000
ema_beta
=
0.5
**
(
32
/
max
(
ema_nimg
,
1e-8
))
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interp_cfg
=
dict
(
momentum
=
ema_beta
),
interval
=
1
,
start_iter
=
0
,
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/metface_1024x1024_noflip.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
dict
(
milestones
=
[
80000
],
interval
=
[
10000
,
5000
]),
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
lr_config
=
None
total_iters
=
160000
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_afhqv2_512_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
512
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
in_size
=
512
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ffhq_1024_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
synthesis_cfg
=
synthesis_cfg
,
rgb2bgr
=
True
),
discriminator
=
dict
(
in_size
=
1024
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_ffhqu_256_b4x8_cvt_official_rgb.py
0 → 100644
View file @
1401de15
_base_
=
[
'./stylegan3_base.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
16384
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
256
,
img_channels
=
3
,
rgb2bgr
=
True
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
in_size
=
256
,
channel_multiplier
=
1
))
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_noaug_fp16_gamma2.0_ffhq_256_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/unconditional_imgs_flip_lanczos_resize_256x256.py'
,
'../_base_/default_runtime.py'
]
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
16384
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
r1_gamma
=
2.
# set by user
d_reg_interval
=
16
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
256
,
img_channels
=
3
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
in_size
=
256
,
channel_multiplier
=
1
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
'data/ffhq/images'
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interval
=
1
,
start_iter
=
0
,
momentum_policy
=
'rampup'
,
momentum_cfg
=
dict
(
ema_kimg
=
10
,
ema_rampup
=
0.05
,
batch_size
=
32
,
eps
=
1e-8
),
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/ffhq-lanczos-256x256.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
inception_path
=
None
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
10000
,
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
,
inception_path
=
inception_path
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
checkpoint_config
=
dict
(
interval
=
10000
,
by_epoch
=
False
,
max_keep_ckpts
=
30
)
lr_config
=
None
total_iters
=
800002
build/lib/mmgen/.mim/configs/styleganv3/stylegan3_t_noaug_fp16_gamma32.8_ffhq_1024_b4x8.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/models/stylegan/stylegan3_base.py'
,
'../_base_/datasets/ffhq_flip.py'
,
'../_base_/default_runtime.py'
]
batch_size
=
32
magnitude_ema_beta
=
0.5
**
(
batch_size
/
(
20
*
1e3
))
synthesis_cfg
=
{
'type'
:
'SynthesisNetwork'
,
'channel_base'
:
32768
,
'channel_max'
:
512
,
'magnitude_ema_beta'
:
0.999
}
r1_gamma
=
32.8
d_reg_interval
=
16
model
=
dict
(
type
=
'StaticUnconditionalGAN'
,
generator
=
dict
(
out_size
=
1024
,
img_channels
=
3
,
synthesis_cfg
=
synthesis_cfg
),
discriminator
=
dict
(
in_size
=
1024
),
gan_loss
=
dict
(
type
=
'GANLoss'
,
gan_type
=
'wgan-logistic-ns'
),
disc_auxiliary_loss
=
dict
(
loss_weight
=
r1_gamma
/
2.0
*
d_reg_interval
))
imgs_root
=
None
# set by user
data
=
dict
(
samples_per_gpu
=
4
,
train
=
dict
(
dataset
=
dict
(
imgs_root
=
imgs_root
)),
val
=
dict
(
imgs_root
=
imgs_root
))
ema_half_life
=
10.
# G_smoothing_kimg
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
5000
),
dict
(
type
=
'ExponentialMovingAverageHook'
,
module_keys
=
(
'generator_ema'
,
),
interp_mode
=
'lerp'
,
interval
=
1
,
start_iter
=
0
,
momentum_policy
=
'rampup'
,
momentum_cfg
=
dict
(
ema_kimg
=
10
,
ema_rampup
=
0.05
,
batch_size
=
batch_size
,
eps
=
1e-8
),
priority
=
'VERY_HIGH'
)
]
inception_pkl
=
'work_dirs/inception_pkl/ffhq_noflip_1024x1024.pkl'
metrics
=
dict
(
fid50k
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
))
evaluation
=
dict
(
type
=
'GenerativeEvalHook'
,
interval
=
10000
,
metrics
=
dict
(
type
=
'FID'
,
num_images
=
50000
,
inception_pkl
=
inception_pkl
,
inception_args
=
dict
(
type
=
'StyleGAN'
),
bgr2rgb
=
True
),
sample_kwargs
=
dict
(
sample_model
=
'ema'
))
checkpoint_config
=
dict
(
interval
=
10000
,
by_epoch
=
False
,
max_keep_ckpts
=
30
)
lr_config
=
None
total_iters
=
800002
build/lib/mmgen/.mim/configs/wgan-gp/metafile.yml
0 → 100644
View file @
1401de15
Collections
:
-
Metadata
:
Architecture
:
-
WGAN-GP
Name
:
WGAN-GP
Paper
:
-
https://arxiv.org/abs/1704.00028
README
:
configs/wgan-gp/README.md
Models
:
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/wgan-gp/wgangp_GN_celeba-cropped_128_b64x1_160kiter.py
In Collection
:
WGAN-GP
Metadata
:
Training Data
:
CELEBA
Name
:
wgangp_GN_celeba-cropped_128_b64x1_160kiter
Results
:
-
Dataset
:
CELEBA
Metrics
:
Details
:
GN
MS-SSIM
:
0.2601
SWD
:
5.87, 9.76, 9.43, 18.84/10.97
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/wgangp/wgangp_GN_celeba-cropped_128_b64x1_160k_20210408_170611-f8a99336.pth
-
Config
:
https://github.com/open-mmlab/mmgeneration/tree/master/configs/wgan-gp/wgangp_GN_GP-50_lsun-bedroom_128_b64x1_160kiter.py
In Collection
:
WGAN-GP
Metadata
:
Training Data
:
LSUN
Name
:
wgangp_GN_GP-50_lsun-bedroom_128_b64x1_160kiter
Results
:
-
Dataset
:
LSUN
Metrics
:
Details
:
GN, GP-lambda =
50
MS-SSIM
:
0.059
SWD
:
11.7, 7.87, 9.82, 25.36/13.69
Task
:
Unconditional GANs
Weights
:
https://download.openmmlab.com/mmgen/wgangp/wgangp_GN_GP-50_lsun-bedroom_128_b64x1_130k_20210408_170509-56f2a37c.pth
build/lib/mmgen/.mim/configs/wgan-gp/wgangp_GN_GP-50_lsun-bedroom_128_b64x1_160kiter.py
0 → 100644
View file @
1401de15
_base_
=
[
'./wgangp_GN_celeba-cropped_128_b64x1_160kiter.py'
]
model
=
dict
(
disc_auxiliary_loss
=
[
dict
(
type
=
'GradientPenaltyLoss'
,
loss_weight
=
50
,
norm_mode
=
'HWC'
,
data_info
=
dict
(
discriminator
=
'disc'
,
real_data
=
'real_imgs'
,
fake_data
=
'fake_imgs'
))
])
data
=
dict
(
samples_per_gpu
=
64
,
train
=
dict
(
imgs_root
=
'./data/lsun/bedroom_train'
))
build/lib/mmgen/.mim/configs/wgan-gp/wgangp_GN_celeba-cropped_128_b64x1_160kiter.py
0 → 100644
View file @
1401de15
_base_
=
[
'../_base_/datasets/unconditional_imgs_128x128.py'
,
'../_base_/models/wgangp/wgangp_base.py'
]
data
=
dict
(
samples_per_gpu
=
64
,
train
=
dict
(
imgs_root
=
'./data/celeba-cropped/cropped_images_aligned_png/'
))
checkpoint_config
=
dict
(
interval
=
10000
,
by_epoch
=
False
)
log_config
=
dict
(
interval
=
100
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
)])
custom_hooks
=
[
dict
(
type
=
'VisualizeUnconditionalSamples'
,
output_dir
=
'training_samples'
,
interval
=
1000
)
]
lr_config
=
None
total_iters
=
160000
metrics
=
dict
(
ms_ssim10k
=
dict
(
type
=
'MS_SSIM'
,
num_images
=
10000
),
swd16k
=
dict
(
type
=
'SWD'
,
num_images
=
16384
,
image_shape
=
(
3
,
128
,
128
)))
build/lib/mmgen/.mim/model-index.yml
0 → 100644
View file @
1401de15
Import
:
-
configs/ada/metafile.yml
-
configs/biggan/metafile.yml
-
configs/cyclegan/metafile.yml
-
configs/dcgan/metafile.yml
-
configs/ggan/metafile.yml
-
configs/improved_ddpm/metafile.yml
-
configs/lsgan/metafile.yml
-
configs/pggan/metafile.yml
-
configs/pix2pix/metafile.yml
-
configs/positional_encoding_in_gans/metafile.yml
-
configs/sagan/metafile.yml
-
configs/singan/metafile.yml
-
configs/sngan_proj/metafile.yml
-
configs/styleganv1/metafile.yml
-
configs/styleganv2/metafile.yml
-
configs/styleganv3/metafile.yml
-
configs/wgan-gp/metafile.yml
build/lib/mmgen/.mim/tools/deployment/mmgen2torchserver.py
0 → 100644
View file @
1401de15
# Copyright (c) OpenMMLab. All rights reserved.
from
argparse
import
ArgumentParser
,
Namespace
from
pathlib
import
Path
from
tempfile
import
TemporaryDirectory
import
mmcv
try
:
from
model_archiver.model_packaging
import
package_model
from
model_archiver.model_packaging_utils
import
ModelExportUtils
except
ImportError
:
package_model
=
None
def
mmgen2torchserver
(
config_file
:
str
,
checkpoint_file
:
str
,
output_folder
:
str
,
model_name
:
str
,
model_version
:
str
=
'1.0'
,
model_type
:
str
=
'unconditional'
,
force
:
bool
=
False
):
"""Converts MMGeneration model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file (str): Path of config file. The config should in
MMGeneration format.
checkpoint_file (str): Path of checkpoint. The checkpoint should in
MMGeneration checkpoint format.
output_folder (str): Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name (str): Name of the generated ``'mar'`` file. If not None,
used for naming the `{model_name}.mar` file that will be created
under `output_folder`. If None, `{Path(checkpoint_file).stem}`
will be used.
model_version (str, optional): Model's version. Defaults to '1.0'.
model_type (str, optional): Type of the model to be convert. Handler
named ``{model_type}_handler`` would be used to generate ``mar``
file. Defaults to 'unconditional'.
force (bool, optional): If True, existing `{model_name}.mar` will be
overwritten. Default to False.
"""
mmcv
.
mkdir_or_exist
(
output_folder
)
config
=
mmcv
.
Config
.
fromfile
(
config_file
)
with
TemporaryDirectory
()
as
tmpdir
:
config
.
dump
(
f
'
{
tmpdir
}
/config.py'
)
args
=
Namespace
(
**
{
'model_file'
:
f
'
{
tmpdir
}
/config.py'
,
'serialized_file'
:
checkpoint_file
,
'handler'
:
f
'
{
Path
(
__file__
).
parent
}
/mmgen_
{
model_type
}
_handler.py'
,
'model_name'
:
model_name
or
Path
(
checkpoint_file
).
stem
,
'version'
:
model_version
,
'export_path'
:
output_folder
,
'force'
:
force
,
'requirements_file'
:
None
,
'extra_files'
:
None
,
'runtime'
:
'python'
,
'archive_format'
:
'default'
})
manifest
=
ModelExportUtils
.
generate_manifest_json
(
args
)
package_model
(
args
,
manifest
)
def
parse_args
():
parser
=
ArgumentParser
(
description
=
'Convert MMGeneration models to TorchServe `.mar` format.'
)
parser
.
add_argument
(
'config'
,
type
=
str
,
help
=
'config file path'
)
parser
.
add_argument
(
'checkpoint'
,
type
=
str
,
help
=
'checkpoint file path'
)
parser
.
add_argument
(
'--output-folder'
,
type
=
str
,
required
=
True
,
help
=
'Folder where `{model_name}.mar` will be created.'
)
parser
.
add_argument
(
'--model-name'
,
type
=
str
,
default
=
None
,
help
=
'If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.'
)
parser
.
add_argument
(
'--model-type'
,
type
=
str
,
default
=
'unconditional'
,
help
=
'Which model type and handler to be used.'
)
parser
.
add_argument
(
'--model-version'
,
type
=
str
,
default
=
'1.0'
,
help
=
'Number used for versioning.'
)
parser
.
add_argument
(
'-f'
,
'--force'
,
action
=
'store_true'
,
help
=
'overwrite the existing `{model_name}.mar`'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
package_model
is
None
:
raise
ImportError
(
'`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver'
)
mmgen2torchserver
(
args
.
config
,
args
.
checkpoint
,
args
.
output_folder
,
args
.
model_name
,
args
.
model_version
,
args
.
model_type
,
args
.
force
)
build/lib/mmgen/.mim/tools/deployment/mmgen_unconditional_handler.py
0 → 100644
View file @
1401de15
# Copyright (c) OpenMMLab. All rights reserved.
import
os
import
numpy
as
np
import
torch
from
ts.torch_handler.base_handler
import
BaseHandler
from
mmgen.apis
import
init_model
class
MMGenUnconditionalHandler
(
BaseHandler
):
def
initialize
(
self
,
context
):
properties
=
context
.
system_properties
self
.
map_location
=
'cuda'
if
torch
.
cuda
.
is_available
()
else
'cpu'
self
.
device
=
torch
.
device
(
self
.
map_location
+
':'
+
str
(
properties
.
get
(
'gpu_id'
))
if
torch
.
cuda
.
is_available
()
else
self
.
map_location
)
self
.
manifest
=
context
.
manifest
model_dir
=
properties
.
get
(
'model_dir'
)
serialized_file
=
self
.
manifest
[
'model'
][
'serializedFile'
]
checkpoint
=
os
.
path
.
join
(
model_dir
,
serialized_file
)
self
.
config_file
=
os
.
path
.
join
(
model_dir
,
'config.py'
)
self
.
model
=
init_model
(
self
.
config_file
,
checkpoint
,
self
.
device
)
self
.
initialized
=
True
def
preprocess
(
self
,
data
,
*
args
,
**
kwargs
):
data_decode
=
dict
()
# `data` type is `list[dict]`
for
k
,
v
in
data
[
0
].
items
():
# decode strings
if
isinstance
(
v
,
bytearray
):
data_decode
[
k
]
=
v
.
decode
()
return
data_decode
def
inference
(
self
,
data
,
*
args
,
**
kwargs
):
sample_model
=
data
[
'sample_model'
]
print
(
sample_model
)
results
=
self
.
model
.
sample_from_noise
(
None
,
num_batches
=
1
,
sample_model
=
sample_model
,
**
kwargs
)
return
results
def
postprocess
(
self
,
data
):
# convert torch tensor to numpy and then convert to bytes
output_list
=
[]
for
data_
in
data
:
data_
=
(
data_
+
1
)
/
2
data_
=
data_
[[
2
,
1
,
0
],
...]
data_
=
data_
.
clamp_
(
0
,
1
)
data_
=
(
data_
*
255
).
permute
(
1
,
2
,
0
)
data_np
=
data_
.
detach
().
cpu
().
numpy
().
astype
(
np
.
uint8
)
data_byte
=
data_np
.
tobytes
()
output_list
.
append
(
data_byte
)
return
output_list
build/lib/mmgen/.mim/tools/deployment/test_torchserver.py
0 → 100644
View file @
1401de15
# Copyright (c) OpenMMLab. All rights reserved.
from
argparse
import
ArgumentParser
import
numpy
as
np
import
requests
from
PIL
import
Image
def
parse_args
():
parser
=
ArgumentParser
()
parser
.
add_argument
(
'model_name'
,
help
=
'The model name in the server'
)
parser
.
add_argument
(
'--inference-addr'
,
default
=
'127.0.0.1:8080'
,
help
=
'Address and port of the inference server'
)
parser
.
add_argument
(
'--img-path'
,
type
=
str
,
default
=
'demo.png'
,
help
=
'Path to save generated image.'
)
parser
.
add_argument
(
'--img-size'
,
type
=
int
,
default
=
128
,
help
=
'Size of the output image.'
)
parser
.
add_argument
(
'--sample-model'
,
type
=
str
,
default
=
'ema/orig'
,
help
=
'Which model you want to use.'
)
args
=
parser
.
parse_args
()
return
args
def
save_results
(
contents
,
img_path
,
img_size
):
if
not
isinstance
(
contents
,
list
):
Image
.
frombytes
(
'RGB'
,
(
img_size
,
img_size
),
contents
).
save
(
img_path
)
return
imgs
=
[]
for
content
in
contents
:
imgs
.
append
(
np
.
array
(
Image
.
frombytes
(
'RGB'
,
(
img_size
,
img_size
),
content
)))
Image
.
fromarray
(
np
.
concatenate
(
imgs
,
axis
=
1
)).
save
(
img_path
)
def
main
(
args
):
url
=
'http://'
+
args
.
inference_addr
+
'/predictions/'
+
args
.
model_name
if
args
.
sample_model
==
'ema/orig'
:
cont_ema
=
requests
.
post
(
url
,
{
'sample_model'
:
'ema'
}).
content
cont_orig
=
requests
.
post
(
url
,
{
'sample_model'
:
'orig'
}).
content
save_results
([
cont_ema
,
cont_orig
],
args
.
img_path
,
args
.
img_size
)
return
response
=
requests
.
post
(
url
,
{
'sample_model'
:
args
.
sample_model
})
save_results
(
response
.
content
,
args
.
img_path
,
args
.
img_size
)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
build/lib/mmgen/.mim/tools/dist_eval.sh
0 → 100644
View file @
1401de15
#!/usr/bin/env bash
CONFIG
=
$1
CHECKPOINT
=
$2
GPUS
=
$3
NNODES
=
${
NNODES
:-
1
}
NODE_RANK
=
${
NODE_RANK
:-
0
}
PORT
=
${
PORT
:-
29500
}
MASTER_ADDR
=
${
MASTER_ADDR
:-
"127.0.0.1"
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
\
--nnodes
=
$NNODES
\
--node_rank
=
$NODE_RANK
\
--master_addr
=
$MASTER_ADDR
\
--nproc_per_node
=
$GPUS
\
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/test.py
\
$CONFIG
\
$CHECKPOINT
\
--launcher
pytorch
\
${
@
:4
}
Prev
1
…
5
6
7
8
9
10
11
12
13
…
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment