Commit 12c02d09 authored by renzhc's avatar renzhc
Browse files

updated densenet121 efficentnet-b2 seresnet50 shufflenet-v2

parent 64c15d70
# dataset settings
dataset_type = 'CustomDataset'
data_preprocessor = dict(
num_classes=200,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=256, edge='short'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs'),
]
train_dataloader = dict(
batch_size=64,
num_workers=5,
dataset=dict(
type=dataset_type,
data_root='data/imagenet',
data_prefix='train',
pipeline=train_pipeline),
sampler=dict(type='DefaultSampler', shuffle=True),
)
val_dataloader = dict(
batch_size=64,
num_workers=5,
dataset=dict(
type=dataset_type,
data_root='data/imagenet',
data_prefix='val',
pipeline=test_pipeline),
sampler=dict(type='DefaultSampler', shuffle=False),
)
val_evaluator = dict(type='Accuracy', topk=(1, 5))
# If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
# dataset settings
dataset_type = 'CustomDataset'
data_preprocessor = dict(
num_classes=200,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
dict(type='CenterCrop', crop_size=224),
dict(type='PackInputs'),
]
train_dataloader = dict(
batch_size=64,
num_workers=5,
dataset=dict(
type=dataset_type,
data_root='data/imagenet',
data_prefix='train',
pipeline=train_pipeline),
sampler=dict(type='DefaultSampler', shuffle=True),
)
val_dataloader = dict(
batch_size=64,
num_workers=5,
dataset=dict(
type=dataset_type,
data_root='data/imagenet',
data_prefix='val',
pipeline=test_pipeline),
sampler=dict(type='DefaultSampler', shuffle=False),
)
val_evaluator = dict(type='Accuracy', topk=(1, 5))
# If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
# Model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='DenseNet', arch='121'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=200,
in_channels=1024,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
_base_ = [
'configs/_base_/models/tiny_efficientnet_b2.py',
'configs/_base_/datasets/tiny_imagenet_bs32.py',
'configs/_base_/schedules/imagenet_bs256.py',
'configs/_base_/default_runtime.py',
]
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='EfficientNetRandomCrop', scale=260),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='EfficientNetCenterCrop', crop_size=260),
dict(type='PackInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
root@K100_AI02:/renzhc/workdir/mmpretrain# cat configs/_base_/models/tiny_efficientnet_b2.py
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='EfficientNet', arch='b2'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=200,
in_channels=1408,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='SEResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=200,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='ShuffleNetV2', widen_factor=1.0),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=200,
in_channels=1024,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
_base_ = [
'configs/_base_/models/densenet/tiny_densenet121.py',
'configs/_base_/datasets/tiny_imagenet_bs64.py',
'configs/_base_/schedules/imagenet_bs256.py',
'configs/_base_/default_runtime.py',
]
# dataset settings
train_dataloader = dict(batch_size=256)
# schedule settings
train_cfg = dict(by_epoch=True, max_epochs=90)
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (4 GPUs) x (256 samples per GPU)
auto_scale_lr = dict(base_batch_size=1024)
_base_ = [
'configs/_base_/models/tiny_efficientnet_b2.py',
'configs/_base_/datasets/tiny_imagenet_bs32.py',
'configs/_base_/schedules/imagenet_bs256.py',
'configs/_base_/default_runtime.py',
]
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='EfficientNetRandomCrop', scale=260),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='PackInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='EfficientNetCenterCrop', crop_size=260),
dict(type='PackInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
_base_ = [
'configs/_base_/models/tiny_seresnet50.py',
'configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py',
'configs/_base_/schedules/imagenet_bs256_140e.py',
'configs/_base_/default_runtime.py'
]
_base_ = [
'configs/_base_/models/tiny_shufflenet_v2_1x.py',
'configs/_base_/datasets/tiny_imagenet_bs64_pil_resize.py',
'configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
'configs/_base_/default_runtime.py'
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment