Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmpretrain
Commits
cbc25585
Commit
cbc25585
authored
Jun 24, 2025
by
limm
Browse files
add mmpretrain/ part
parent
1baf0566
Pipeline
#2801
canceled with stages
Changes
268
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
677 additions
and
0 deletions
+677
-0
mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py
mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py
+9
-0
mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py
...in/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py
+40
-0
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py
...onfigs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py
+85
-0
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py
...onfigs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py
+83
-0
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py
...in/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py
+34
-0
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py
.../configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py
+34
-0
mmpretrain/configs/resnet/resnet18_8xb32_in1k.py
mmpretrain/configs/resnet/resnet18_8xb32_in1k.py
+9
-0
mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py
...configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py
+58
-0
mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
+35
-0
mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
...n/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
+12
-0
mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py
...etrain/configs/swin_transformer/swin_large_16xb64_in1k.py
+18
-0
mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py
.../configs/swin_transformer/swin_large_16xb64_in1k_384px.py
+18
-0
mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
...ain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
+49
-0
mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py
...etrain/configs/swin_transformer/swin_small_16xb64_in1k.py
+37
-0
mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
+37
-0
mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py
...swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py
+32
-0
mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py
.../swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py
+24
-0
mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py
...sformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py
+26
-0
mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py
...sformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py
+14
-0
mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py
...s/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py
+23
-0
No files found.
Too many changes to show.
To preserve performance only
268 of 268+
files are displayed.
Plain diff
Email patch
mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs32_pil_resize
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.mobilenet_v2_1x
import
*
from
.._base_.schedules.imagenet_bs256_epochstep
import
*
mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.models.mobilenet_v3_small
import
*
from
.._base_.datasets.imagenet_bs128_mbv3
import
*
from
.._base_.default_runtime
import
*
from
mmengine.optim
import
StepLR
from
torch.optim
import
RMSprop
# model settings
model
.
merge
(
dict
(
backbone
=
dict
(
arch
=
'large'
),
head
=
dict
(
in_channels
=
960
,
mid_channels
=
[
1280
]),
))
# schedule settings
optim_wrapper
=
dict
(
optimizer
=
dict
(
type
=
RMSprop
,
lr
=
0.064
,
alpha
=
0.9
,
momentum
=
0.9
,
eps
=
0.0316
,
weight_decay
=
1e-5
))
param_scheduler
=
dict
(
type
=
StepLR
,
by_epoch
=
True
,
step_size
=
2
,
gamma
=
0.973
)
train_cfg
=
dict
(
by_epoch
=
True
,
max_epochs
=
600
,
val_interval
=
1
)
val_cfg
=
dict
()
test_cfg
=
dict
()
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (8 GPUs) x (128 samples per GPU)
auto_scale_lr
=
dict
(
base_batch_size
=
1024
)
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.models.mobilenet_v3_small
import
*
from
.._base_.datasets.imagenet_bs128_mbv3
import
*
from
.._base_.default_runtime
import
*
from
mmengine.optim
import
StepLR
from
torch.nn.modules.batchnorm
import
BatchNorm2d
from
torch.optim
import
RMSprop
# model settings
model
.
merge
(
dict
(
backbone
=
dict
(
arch
=
'small_050'
,
norm_cfg
=
dict
(
type
=
BatchNorm2d
,
eps
=
1e-5
,
momentum
=
0.1
)),
head
=
dict
(
in_channels
=
288
),
))
train_pipeline
=
[
dict
(
type
=
LoadImageFromFile
),
dict
(
type
=
RandomResizedCrop
,
scale
=
224
,
backend
=
'pillow'
,
interpolation
=
'bicubic'
),
dict
(
type
=
RandomFlip
,
prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
AutoAugment
,
policies
=
'imagenet'
,
hparams
=
dict
(
pad_val
=
[
round
(
x
)
for
x
in
[
103.53
,
116.28
,
123.675
]])),
dict
(
type
=
RandomErasing
,
erase_prob
=
0.2
,
mode
=
'rand'
,
min_area_ratio
=
0.02
,
max_area_ratio
=
1
/
3
,
fill_color
=
[
103.53
,
116.28
,
123.675
],
fill_std
=
[
57.375
,
57.12
,
58.395
]),
dict
(
type
=
PackInputs
),
]
test_pipeline
=
[
dict
(
type
=
LoadImageFromFile
),
dict
(
type
=
ResizeEdge
,
scale
=
256
,
edge
=
'short'
,
backend
=
'pillow'
,
interpolation
=
'bicubic'
),
dict
(
type
=
CenterCrop
,
crop_size
=
224
),
dict
(
type
=
PackInputs
),
]
train_dataloader
.
merge
(
dict
(
dataset
=
dict
(
pipeline
=
train_pipeline
)))
val_dataloader
.
merge
(
dict
(
dataset
=
dict
(
pipeline
=
test_pipeline
)))
# If you want standard test, please manually configure the test dataset
test_dataloader
=
val_dataloader
# schedule settings
optim_wrapper
=
dict
(
optimizer
=
dict
(
type
=
RMSprop
,
lr
=
0.064
,
alpha
=
0.9
,
momentum
=
0.9
,
eps
=
0.0316
,
weight_decay
=
1e-5
))
param_scheduler
=
dict
(
type
=
StepLR
,
by_epoch
=
True
,
step_size
=
2
,
gamma
=
0.973
)
train_cfg
=
dict
(
by_epoch
=
True
,
max_epochs
=
600
,
val_interval
=
10
)
val_cfg
=
dict
()
test_cfg
=
dict
()
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (8 GPUs) x (128 samples per GPU)
auto_scale_lr
=
dict
(
base_batch_size
=
1024
)
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.models.mobilenet_v3_small
import
*
from
.._base_.datasets.imagenet_bs128_mbv3
import
*
from
.._base_.default_runtime
import
*
from
mmengine.optim
import
StepLR
from
torch.nn.modules.batchnorm
import
BatchNorm2d
from
torch.optim
import
RMSprop
# model settings
model
.
merge
(
dict
(
backbone
=
dict
(
arch
=
'small_075'
,
norm_cfg
=
dict
(
type
=
BatchNorm2d
,
eps
=
1e-5
,
momentum
=
0.1
)),
head
=
dict
(
in_channels
=
432
),
))
train_pipeline
=
[
dict
(
type
=
LoadImageFromFile
),
dict
(
type
=
RandomResizedCrop
,
scale
=
224
,
backend
=
'pillow'
,
interpolation
=
'bicubic'
),
dict
(
type
=
RandomFlip
,
prob
=
0.5
,
direction
=
'horizontal'
),
dict
(
type
=
AutoAugment
,
policies
=
'imagenet'
,
hparams
=
dict
(
pad_val
=
[
round
(
x
)
for
x
in
[
103.53
,
116.28
,
123.675
]])),
dict
(
type
=
RandomErasing
,
erase_prob
=
0.2
,
mode
=
'rand'
,
min_area_ratio
=
0.02
,
max_area_ratio
=
1
/
3
,
fill_color
=
[
103.53
,
116.28
,
123.675
],
fill_std
=
[
57.375
,
57.12
,
58.395
]),
dict
(
type
=
PackInputs
),
]
test_pipeline
=
[
dict
(
type
=
LoadImageFromFile
),
dict
(
type
=
ResizeEdge
,
scale
=
256
,
edge
=
'short'
,
backend
=
'pillow'
,
interpolation
=
'bicubic'
),
dict
(
type
=
CenterCrop
,
crop_size
=
224
),
dict
(
type
=
PackInputs
),
]
train_dataloader
.
merge
(
dict
(
dataset
=
dict
(
pipeline
=
train_pipeline
)))
val_dataloader
.
merge
(
dict
(
dataset
=
dict
(
pipeline
=
test_pipeline
)))
test_dataloader
=
val_dataloader
# schedule settings
optim_wrapper
=
dict
(
optimizer
=
dict
(
type
=
RMSprop
,
lr
=
0.064
,
alpha
=
0.9
,
momentum
=
0.9
,
eps
=
0.0316
,
weight_decay
=
1e-5
))
param_scheduler
=
dict
(
type
=
StepLR
,
by_epoch
=
True
,
step_size
=
2
,
gamma
=
0.973
)
train_cfg
=
dict
(
by_epoch
=
True
,
max_epochs
=
600
,
val_interval
=
10
)
val_cfg
=
dict
()
test_cfg
=
dict
()
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (8 GPUs) x (128 samples per GPU)
auto_scale_lr
=
dict
(
base_batch_size
=
1024
)
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.models.mobilenet_v3_small
import
*
from
.._base_.datasets.imagenet_bs128_mbv3
import
*
from
.._base_.default_runtime
import
*
from
mmengine.optim
import
StepLR
from
torch.optim
import
RMSprop
# schedule settings
optim_wrapper
=
dict
(
optimizer
=
dict
(
type
=
RMSprop
,
lr
=
0.064
,
alpha
=
0.9
,
momentum
=
0.9
,
eps
=
0.0316
,
weight_decay
=
1e-5
))
param_scheduler
=
dict
(
type
=
StepLR
,
by_epoch
=
True
,
step_size
=
2
,
gamma
=
0.973
)
train_cfg
=
dict
(
by_epoch
=
True
,
max_epochs
=
600
,
val_interval
=
1
)
val_cfg
=
dict
()
test_cfg
=
dict
()
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (8 GPUs) x (128 samples per GPU)
auto_scale_lr
=
dict
(
base_batch_size
=
1024
)
mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.models.mobilenet_v3_small
import
*
from
.._base_.datasets.cifar10_bs16
import
*
from
.._base_.schedules.cifar10_bs128
import
*
from
.._base_.default_runtime
import
*
from
mmengine.optim
import
MultiStepLR
# model settings
model
.
merge
(
dict
(
head
=
dict
(
_delete_
=
True
,
type
=
StackedLinearClsHead
,
num_classes
=
10
,
in_channels
=
576
,
mid_channels
=
[
1280
],
act_cfg
=
dict
(
type
=
Hardswish
),
loss
=
dict
(
type
=
CrossEntropyLoss
,
loss_weight
=
1.0
),
topk
=
(
1
,
5
))))
# schedule settings
param_scheduler
.
merge
(
dict
(
type
=
MultiStepLR
,
by_epoch
=
True
,
milestones
=
[
120
,
170
],
gamma
=
0.1
,
))
train_cfg
.
merge
(
dict
(
by_epoch
=
True
,
max_epochs
=
200
))
mmpretrain/configs/resnet/resnet18_8xb32_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs32
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.resnet18
import
*
from
.._base_.schedules.imagenet_bs256
import
*
mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs32_simclr
import
*
from
.._base_.schedules.imagenet_lars_coslr_200e
import
*
from
.._base_.default_runtime
import
*
from
mmengine.hooks.checkpoint_hook
import
CheckpointHook
from
mmengine.optim.optimizer.optimizer_wrapper
import
OptimWrapper
from
mmpretrain.engine.optimizers.lars
import
LARS
from
mmpretrain.models.backbones.resnet
import
ResNet
from
mmpretrain.models.heads.contrastive_head
import
ContrastiveHead
from
mmpretrain.models.losses.cross_entropy_loss
import
CrossEntropyLoss
from
mmpretrain.models.necks.nonlinear_neck
import
NonLinearNeck
from
mmpretrain.models.selfsup.simclr
import
SimCLR
# dataset settings
train_dataloader
.
merge
(
dict
(
batch_size
=
256
))
# model settings
model
=
dict
(
type
=
SimCLR
,
backbone
=
dict
(
type
=
ResNet
,
depth
=
50
,
norm_cfg
=
dict
(
type
=
'SyncBN'
),
zero_init_residual
=
True
),
neck
=
dict
(
type
=
NonLinearNeck
,
# SimCLR non-linear neck
in_channels
=
2048
,
hid_channels
=
2048
,
out_channels
=
128
,
num_layers
=
2
,
with_avg_pool
=
True
),
head
=
dict
(
type
=
ContrastiveHead
,
loss
=
dict
(
type
=
CrossEntropyLoss
),
temperature
=
0.1
),
)
# optimizer
optim_wrapper
=
dict
(
type
=
OptimWrapper
,
optimizer
=
dict
(
type
=
LARS
,
lr
=
4.8
,
momentum
=
0.9
,
weight_decay
=
1e-6
),
paramwise_cfg
=
dict
(
custom_keys
=
{
'bn'
:
dict
(
decay_mult
=
0
,
lars_exclude
=
True
),
'bias'
:
dict
(
decay_mult
=
0
,
lars_exclude
=
True
),
# bn layer in ResNet block downsample module
'downsample.1'
:
dict
(
decay_mult
=
0
,
lars_exclude
=
True
)
}))
# runtime settings
default_hooks
.
checkpoint
=
dict
(
type
=
CheckpointHook
,
interval
=
10
,
max_keep_ckpts
=
3
)
mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
LabelSmoothLoss
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_224
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
img_size
=
224
,
drop_path_rate
=
0.5
,
stage_cfgs
=
None
),
head
=
dict
(
init_cfg
=
None
,
# suppress the default init_cfg of LinearClsHead.
loss
=
dict
(
type
=
LabelSmoothLoss
,
label_smooth_val
=
0.1
,
mode
=
'original'
,
loss_weight
=
0
),
topk
=
None
,
cal_acc
=
False
),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_384
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_224
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
arch
=
'large'
,
img_size
=
224
,
stage_cfgs
=
None
),
head
=
dict
(
in_channels
=
1536
),
)
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_384
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
arch
=
'large'
),
head
=
dict
(
in_channels
=
1536
),
)
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.hooks
import
CheckpointHook
,
LoggerHook
from
mmengine.model
import
PretrainedInit
from
torch.optim.adamw
import
AdamW
from
mmpretrain.models
import
ImageClassifier
with
read_base
():
from
.._base_.datasets.cub_bs8_384
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.cub_bs64
import
*
# model settings
checkpoint
=
'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth'
# noqa
model
.
update
(
backbone
=
dict
(
arch
=
'large'
,
init_cfg
=
dict
(
type
=
PretrainedInit
,
checkpoint
=
checkpoint
,
prefix
=
'backbone'
)),
head
=
dict
(
num_classes
=
200
,
in_channels
=
1536
))
# schedule settings
optim_wrapper
=
dict
(
optimizer
=
dict
(
_delete_
=
True
,
type
=
AdamW
,
lr
=
5e-6
,
weight_decay
=
0.0005
,
eps
=
1e-8
,
betas
=
(
0.9
,
0.999
)),
paramwise_cfg
=
dict
(
norm_decay_mult
=
0.0
,
bias_decay_mult
=
0.0
,
custom_keys
=
{
'.absolute_pos_embed'
:
dict
(
decay_mult
=
0.0
),
'.relative_position_bias_table'
:
dict
(
decay_mult
=
0.0
)
}),
clip_grad
=
dict
(
max_norm
=
5.0
),
)
default_hooks
=
dict
(
# log every 20 intervals
logger
=
dict
(
type
=
LoggerHook
,
interval
=
20
),
# save last three checkpoints
checkpoint
=
dict
(
type
=
CheckpointHook
,
interval
=
1
,
max_keep_ckpts
=
3
))
mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
LabelSmoothLoss
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_224
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
arch
=
'small'
,
img_size
=
224
,
drop_path_rate
=
0.3
,
stage_cfgs
=
None
),
head
=
dict
(
in_channels
=
768
,
init_cfg
=
None
,
# suppress the default init_cfg of LinearClsHead.
loss
=
dict
(
type
=
LabelSmoothLoss
,
label_smooth_val
=
0.1
,
mode
=
'original'
,
loss_weight
=
0
),
topk
=
None
,
cal_acc
=
False
),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
LabelSmoothLoss
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_224
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
arch
=
'tiny'
,
img_size
=
224
,
drop_path_rate
=
0.2
,
stage_cfgs
=
None
),
head
=
dict
(
in_channels
=
768
,
init_cfg
=
None
,
# suppress the default init_cfg of LinearClsHead.
loss
=
dict
(
type
=
LabelSmoothLoss
,
label_smooth_val
=
0.1
,
mode
=
'original'
,
loss_weight
=
0
),
topk
=
None
,
cal_acc
=
False
),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
# schedule settings
optim_wrapper
=
dict
(
clip_grad
=
dict
(
max_norm
=
5.0
))
mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet21k_bs128
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_v2_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
img_size
=
192
,
drop_path_rate
=
0.5
,
window_size
=
[
12
,
12
,
12
,
6
]),
head
=
dict
(
num_classes
=
21841
),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
# dataset settings
data_preprocessor
=
dict
(
num_classes
=
21841
)
_base_
[
'train_pipeline'
][
1
][
'scale'
]
=
192
# RandomResizedCrop
_base_
[
'test_pipeline'
][
1
][
'scale'
]
=
219
# ResizeEdge
_base_
[
'test_pipeline'
][
2
][
'crop_size'
]
=
192
# CenterCrop
mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_256
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_v2_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
img_size
=
256
,
drop_path_rate
=
0.5
,
window_size
=
[
16
,
16
,
16
,
8
]),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_256
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_v2_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
img_size
=
256
,
window_size
=
[
16
,
16
,
16
,
8
],
pretrained_window_sizes
=
[
12
,
12
,
12
,
6
]),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_384
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_v2_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
window_size
=
[
24
,
24
,
24
,
12
],
pretrained_window_sizes
=
[
12
,
12
,
12
,
6
]))
mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py
0 → 100644
View file @
cbc25585
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from
mmengine.config
import
read_base
from
mmengine.model
import
ConstantInit
,
TruncNormalInit
from
mmpretrain.models
import
CutMix
,
Mixup
with
read_base
():
from
.._base_.datasets.imagenet_bs64_swin_256
import
*
from
.._base_.default_runtime
import
*
from
.._base_.models.swin_transformer_v2_base
import
*
from
.._base_.schedules.imagenet_bs1024_adamw_swin
import
*
# model settings
model
.
update
(
backbone
=
dict
(
img_size
=
256
,
drop_path_rate
=
0.5
),
init_cfg
=
[
dict
(
type
=
TruncNormalInit
,
layer
=
'Linear'
,
std
=
0.02
,
bias
=
0.
),
dict
(
type
=
ConstantInit
,
layer
=
'LayerNorm'
,
val
=
1.
,
bias
=
0.
)
],
train_cfg
=
dict
(
augments
=
[
dict
(
type
=
Mixup
,
alpha
=
0.8
),
dict
(
type
=
CutMix
,
alpha
=
1.0
)]))
Prev
1
2
3
4
5
6
7
8
9
…
14
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment