Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
stylegan2_mmcv
Commits
1401de15
Commit
1401de15
authored
Jun 28, 2024
by
dongchy920
Browse files
stylegan2_mmcv
parents
Pipeline
#1274
canceled with stages
Changes
463
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
869 additions
and
0 deletions
+869
-0
configs/_base_/datasets/cifar10_rgb.py
configs/_base_/datasets/cifar10_rgb.py
+32
-0
configs/_base_/datasets/ffhq_flip.py
configs/_base_/datasets/ffhq_flip.py
+45
-0
configs/_base_/datasets/grow_scale_imgs_128x128.py
configs/_base_/datasets/grow_scale_imgs_128x128.py
+41
-0
configs/_base_/datasets/grow_scale_imgs_celeba-hq.py
configs/_base_/datasets/grow_scale_imgs_celeba-hq.py
+45
-0
configs/_base_/datasets/grow_scale_imgs_ffhq_styleganv1.py
configs/_base_/datasets/grow_scale_imgs_ffhq_styleganv1.py
+43
-0
configs/_base_/datasets/imagenet_128.py
configs/_base_/datasets/imagenet_128.py
+46
-0
configs/_base_/datasets/imagenet_128x128_inception_stat.py
configs/_base_/datasets/imagenet_128x128_inception_stat.py
+50
-0
configs/_base_/datasets/imagenet_256.py
configs/_base_/datasets/imagenet_256.py
+46
-0
configs/_base_/datasets/imagenet_64x64_inception_stat.py
configs/_base_/datasets/imagenet_64x64_inception_stat.py
+50
-0
configs/_base_/datasets/imagenet_noaug_128.py
configs/_base_/datasets/imagenet_noaug_128.py
+46
-0
configs/_base_/datasets/imagenet_noaug_256.py
configs/_base_/datasets/imagenet_noaug_256.py
+46
-0
configs/_base_/datasets/imagenet_noaug_64.py
configs/_base_/datasets/imagenet_noaug_64.py
+46
-0
configs/_base_/datasets/imagenet_rgb.py
configs/_base_/datasets/imagenet_rgb.py
+43
-0
configs/_base_/datasets/lsun-car_pad_512.py
configs/_base_/datasets/lsun-car_pad_512.py
+35
-0
configs/_base_/datasets/lsun_stylegan.py
configs/_base_/datasets/lsun_stylegan.py
+45
-0
configs/_base_/datasets/paired_imgs_256x256.py
configs/_base_/datasets/paired_imgs_256x256.py
+68
-0
configs/_base_/datasets/paired_imgs_256x256_crop.py
configs/_base_/datasets/paired_imgs_256x256_crop.py
+84
-0
configs/_base_/datasets/singan.py
configs/_base_/datasets/singan.py
+12
-0
configs/_base_/datasets/unconditional_imgs_128x128.py
configs/_base_/datasets/unconditional_imgs_128x128.py
+21
-0
configs/_base_/datasets/unconditional_imgs_64x64.py
configs/_base_/datasets/unconditional_imgs_64x64.py
+25
-0
No files found.
Too many changes to show.
To preserve performance only
463 of 463+
files are displayed.
Plain diff
Email patch
configs/_base_/datasets/cifar10_rgb.py
0 → 100644
View file @
1401de15
dataset_type
=
'mmcls.CIFAR10'
# different from mmcls, we adopt the setting used in BigGAN
# Note that the pipelines below are from MMClassification
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'RandomCrop'
,
size
=
32
,
padding
=
4
),
dict
(
type
=
'RandomFlip'
,
flip_prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
# Different from the classification task, the val/test split also use the
# training part, which is the same to StyleGAN-ADA.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/cifar10'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/cifar10'
,
pipeline
=
test_pipeline
),
test
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/cifar10'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/ffhq_flip.py
0 → 100644
View file @
1401de15
dataset_type
=
'UnconditionalImageDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Flip'
,
keys
=
[
'real_img'
],
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
val_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
True
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
'RepeatDataset'
,
times
=
100
,
dataset
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
)),
val
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
val_pipeline
))
configs/_base_/datasets/grow_scale_imgs_128x128.py
0 → 100644
View file @
1401de15
dataset_type
=
'GrowScaleImgDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Resize'
,
keys
=
[
'real_img'
],
scale
=
(
128
,
128
)),
dict
(
type
=
'Flip'
,
keys
=
[
'real_img'
],
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
# samples per gpu should be the same as the first scale, e.g. '4': 64
# in this case
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
# just an example
imgs_roots
=
{
'128'
:
'./data/lsun/bedroom_train'
},
pipeline
=
train_pipeline
,
gpu_samples_base
=
4
,
# note that this should be changed with total gpu number
gpu_samples_per_scale
=
{
'4'
:
64
,
'8'
:
32
,
'16'
:
16
,
'32'
:
8
,
'64'
:
4
},
len_per_stage
=-
1
))
configs/_base_/datasets/grow_scale_imgs_celeba-hq.py
0 → 100644
View file @
1401de15
dataset_type
=
'GrowScaleImgDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Flip'
,
keys
=
[
'real_img'
],
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
# samples per gpu should be the same as the first scale, e.g. '4': 64
# in this case
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
# just an example
imgs_roots
=
{
'64'
:
'./data/celebahq/imgs_64'
,
'256'
:
'./data/celebahq/imgs_256'
,
'512'
:
'./data/celebahq/imgs_512'
,
'1024'
:
'./data/celebahq/imgs_1024'
},
pipeline
=
train_pipeline
,
gpu_samples_base
=
4
,
# note that this should be changed with total gpu number
gpu_samples_per_scale
=
{
'4'
:
64
,
'8'
:
32
,
'16'
:
16
,
'32'
:
8
,
'64'
:
4
},
len_per_stage
=
300000
))
configs/_base_/datasets/grow_scale_imgs_ffhq_styleganv1.py
0 → 100644
View file @
1401de15
dataset_type
=
'GrowScaleImgDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Flip'
,
keys
=
[
'real_img'
],
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
data
=
dict
(
samples_per_gpu
=
64
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
'GrowScaleImgDataset'
,
imgs_roots
=
dict
({
'1024'
:
'./data/ffhq/images'
,
'256'
:
'./data/ffhq/ffhq_imgs/ffhq_256'
,
'64'
:
'./data/ffhq/ffhq_imgs/ffhq_64'
}),
pipeline
=
train_pipeline
,
gpu_samples_base
=
4
,
gpu_samples_per_scale
=
{
'4'
:
64
,
'8'
:
32
,
'16'
:
16
,
'32'
:
8
,
'64'
:
4
,
'128'
:
4
,
'256'
:
4
,
'512'
:
4
,
'1024'
:
4
},
len_per_stage
=
300000
))
configs/_base_/datasets/imagenet_128.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# different from mmcls, we adopt the setting used in BigGAN.
# We use `RandomCropLongEdge` in training and `CenterCropLongEdge` in testing.
# Importantly, the `to_rgb` is set to `False` to remain image orders as BGR.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'RandomCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'RandomFlip'
,
flip_prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_128x128_inception_stat.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# This config is set for extract inception state of ImageNet dataset.
# Following the pipeline of BigGAN, we center crop and resize images to 128x128
# before feeding them to the Inception Net. Please refer to
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/scripts/utils/prepare_data.sh
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/make_hdf5.py
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/calculate_inception_moments.py # noqa
# Importantly, the `to_rgb` is set to `True` to convert image orders to RGB.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
True
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_256.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# different from mmcls, we adopt the setting used in BigGAN.
# We use `RandomCropLongEdge` in training and `CenterCropLongEdge` in testing.
# Importantly, the `to_rgb` is set to `False` to remain image orders as BGR.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'RandomCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
256
,
256
),
backend
=
'pillow'
),
dict
(
type
=
'RandomFlip'
,
flip_prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
256
,
256
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_64x64_inception_stat.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# This config is set for extract inception state of ImageNet dataset.
# Following the pipeline of BigGAN, we center crop and resize images to 128x128
# before feeding them to the Inception Net. Please refer to
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/scripts/utils/prepare_data.sh
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/make_hdf5.py
# https://github.com/ajbrock/BigGAN-PyTorch/blob/master/calculate_inception_moments.py # noqa
# Importantly, the `to_rgb` is set to `True` to convert image orders to RGB.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
True
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
64
,
64
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_noaug_128.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# different from mmcls, we adopt the setting used in BigGAN.
# Importantly, the `to_rgb` is set to `False` to remain image orders as BGR.
# Remove `RandomFlip` augmentation and change `RandomCropLongEdge` to
# `CenterCropLongEdge` to elminiate randomness.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_noaug_256.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# different from mmcls, we adopt the setting used in BigGAN.
# Importantly, the `to_rgb` is set to `False` to remain image orders as BGR.
# Remove `RandomFlip` augmentation and change `RandomCropLongEdge` to
# `CenterCropLongEdge` to elminiate randomness.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
256
,
256
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
128
,
128
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_noaug_64.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# different from mmcls, we adopt the setting used in BigGAN.
# Importantly, the `to_rgb` is set to `False` to remain image orders as BGR.
# Remove `RandomFlip` augmentation and change `RandomCropLongEdge` to
# `CenterCropLongEdge` to elminiate randomness.
img_norm_cfg
=
dict
(
mean
=
[
127.5
,
127.5
,
127.5
],
std
=
[
127.5
,
127.5
,
127.5
],
to_rgb
=
False
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
64
,
64
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'CenterCropLongEdge'
,
keys
=
[
'img'
]),
dict
(
type
=
'Resize'
,
size
=
(
64
,
64
),
backend
=
'pillow'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/imagenet_rgb.py
0 → 100644
View file @
1401de15
# dataset settings
dataset_type
=
'mmcls.ImageNet'
# Note that the pipelines below are from MMClassification
img_norm_cfg
=
dict
(
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
to_rgb
=
True
)
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'RandomResizedCrop'
,
size
=
224
,
backend
=
'pillow'
),
dict
(
type
=
'RandomFlip'
,
flip_prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'ToTensor'
,
keys
=
[
'gt_label'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
,
'gt_label'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
),
dict
(
type
=
'Resize'
,
size
=
(
256
,
-
1
),
backend
=
'pillow'
),
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
),
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img'
])
]
data
=
dict
(
samples_per_gpu
=
64
,
workers_per_gpu
=
2
,
train
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/train'
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
),
test
=
dict
(
# replace `data/val` with `data/test` for standard test
type
=
dataset_type
,
data_prefix
=
'data/imagenet/val'
,
ann_file
=
'data/imagenet/meta/val.txt'
,
pipeline
=
test_pipeline
))
configs/_base_/datasets/lsun-car_pad_512.py
0 → 100644
View file @
1401de15
dataset_type
=
'UnconditionalImageDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Resize'
,
keys
=
[
'real_img'
],
scale
=
(
512
,
384
)),
dict
(
type
=
'NumpyPad'
,
keys
=
[
'real_img'
],
padding
=
((
64
,
64
),
(
0
,
0
),
(
0
,
0
)),
),
dict
(
type
=
'Flip'
,
keys
=
[
'real_img'
],
direction
=
'horizontal'
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
'RepeatDataset'
,
times
=
5
,
dataset
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
)),
val
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
))
configs/_base_/datasets/lsun_stylegan.py
0 → 100644
View file @
1401de15
# Style-based GANs do not perform any augmentation for the LSUN datasets
dataset_type
=
'UnconditionalImageDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
val_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
True
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
'RepeatDataset'
,
times
=
100
,
dataset
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
)),
val
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
val_pipeline
))
configs/_base_/datasets/paired_imgs_256x256.py
0 → 100644
View file @
1401de15
# dataset settings
train_dataset_type
=
'PairedImageDataset'
val_dataset_type
=
'PairedImageDataset'
img_norm_cfg
=
dict
(
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
])
train_pipeline
=
[
dict
(
type
=
'LoadPairedImageFromFile'
,
io_backend
=
'disk'
,
key
=
'pair'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
'img_a'
,
'img_b'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
'img_a'
,
'img_b'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
'img_a'
,
'img_b'
],
to_rgb
=
True
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img_a'
,
'img_b'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img_a'
,
'img_b'
],
meta_keys
=
[
'img_a_path'
,
'img_b_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadPairedImageFromFile'
,
io_backend
=
'disk'
,
key
=
'pair'
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
'img_a'
,
'img_b'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
'img_a'
,
'img_b'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
'img_a'
,
'img_b'
],
to_rgb
=
False
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'img_a'
,
'img_b'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'img_a'
,
'img_b'
],
meta_keys
=
[
'img_a_path'
,
'img_b_path'
])
]
data
=
dict
(
samples_per_gpu
=
4
,
workers_per_gpu
=
4
,
drop_last
=
True
,
train
=
dict
(
type
=
train_dataset_type
,
dataroot
=
None
,
pipeline
=
train_pipeline
,
test_mode
=
False
),
val
=
dict
(
type
=
val_dataset_type
,
dataroot
=
None
,
pipeline
=
test_pipeline
,
test_mode
=
True
),
test
=
dict
(
type
=
val_dataset_type
,
dataroot
=
None
,
pipeline
=
test_pipeline
,
test_mode
=
True
))
configs/_base_/datasets/paired_imgs_256x256_crop.py
0 → 100644
View file @
1401de15
# dataset settings
train_dataset_type
=
'PairedImageDataset'
val_dataset_type
=
'PairedImageDataset'
img_norm_cfg
=
dict
(
mean
=
[
0.5
,
0.5
,
0.5
],
std
=
[
0.5
,
0.5
,
0.5
])
domain_a
=
'photo'
domain_b
=
'mask'
train_pipeline
=
[
dict
(
type
=
'LoadPairedImageFromFile'
,
io_backend
=
'disk'
,
key
=
'pair'
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
286
,
286
),
interpolation
=
'bicubic'
),
dict
(
type
=
'FixedCrop'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
crop_size
=
(
256
,
256
)),
dict
(
type
=
'Flip'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
direction
=
'horizontal'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadPairedImageFromFile'
,
io_backend
=
'disk'
,
key
=
'image'
,
domain_a
=
domain_a
,
domain_b
=
domain_b
,
flag
=
'color'
),
dict
(
type
=
'Resize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
scale
=
(
256
,
256
),
interpolation
=
'bicubic'
),
dict
(
type
=
'RescaleToZeroOne'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Normalize'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
to_rgb
=
False
,
**
img_norm_cfg
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
]),
dict
(
type
=
'Collect'
,
keys
=
[
f
'img_
{
domain_a
}
'
,
f
'img_
{
domain_b
}
'
],
meta_keys
=
[
f
'img_
{
domain_a
}
_path'
,
f
'img_
{
domain_b
}
_path'
])
]
data
=
dict
(
samples_per_gpu
=
1
,
workers_per_gpu
=
4
,
drop_last
=
True
,
train
=
dict
(
type
=
train_dataset_type
,
dataroot
=
None
,
pipeline
=
train_pipeline
,
test_mode
=
False
),
val
=
dict
(
type
=
val_dataset_type
,
dataroot
=
None
,
pipeline
=
test_pipeline
,
test_mode
=
True
),
test
=
dict
(
type
=
val_dataset_type
,
dataroot
=
None
,
pipeline
=
test_pipeline
,
test_mode
=
True
))
configs/_base_/datasets/singan.py
0 → 100644
View file @
1401de15
dataset_type
=
'SinGANDataset'
data
=
dict
(
samples_per_gpu
=
1
,
workers_per_gpu
=
4
,
drop_last
=
False
,
train
=
dict
(
type
=
dataset_type
,
img_path
=
None
,
# need to set
min_size
=
25
,
max_size
=
250
,
scale_factor_init
=
0.75
))
configs/_base_/datasets/unconditional_imgs_128x128.py
0 → 100644
View file @
1401de15
dataset_type
=
'UnconditionalImageDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
),
dict
(
type
=
'Resize'
,
keys
=
[
'real_img'
],
scale
=
(
128
,
128
)),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
))
configs/_base_/datasets/unconditional_imgs_64x64.py
0 → 100644
View file @
1401de15
dataset_type
=
'UnconditionalImageDataset'
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
key
=
'real_img'
,
io_backend
=
'disk'
,
),
dict
(
type
=
'Resize'
,
keys
=
[
'real_img'
],
scale
=
(
64
,
64
)),
dict
(
type
=
'Normalize'
,
keys
=
[
'real_img'
],
mean
=
[
127.5
]
*
3
,
std
=
[
127.5
]
*
3
,
to_rgb
=
False
),
dict
(
type
=
'ImageToTensor'
,
keys
=
[
'real_img'
]),
dict
(
type
=
'Collect'
,
keys
=
[
'real_img'
],
meta_keys
=
[
'real_img_path'
])
]
# `samples_per_gpu` and `imgs_root` need to be set.
data
=
dict
(
samples_per_gpu
=
None
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
),
val
=
dict
(
type
=
dataset_type
,
imgs_root
=
None
,
pipeline
=
train_pipeline
))
Prev
1
…
14
15
16
17
18
19
20
21
22
…
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment