"tools/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "ff3bd8e690f9ed8e108970f1085d74649dc4226f"
Commit 0fd8347d authored by unknown's avatar unknown
Browse files

添加mmclassification-0.24.1代码,删除mmclassification-speed-benchmark

parent cc567e9e
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='EfficientNet', arch='b7'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2560,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='EfficientNet', arch='b8'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2816,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
# `em` means EfficientNet-EdgeTPU-M arch
backbone=dict(type='EfficientNet', arch='em', act_cfg=dict(type='ReLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
# `es` means EfficientNet-EdgeTPU-S arch
backbone=dict(type='EfficientNet', arch='es', act_cfg=dict(type='ReLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='base-gf', drop_path_rate=0.5),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1024,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='base', drop_path_rate=0.5),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1024,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='large-gf', drop_path_rate=0.2),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1536,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='large-gf384', drop_path_rate=0.4),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1536,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
])
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='large', drop_path_rate=0.2),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1536,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='small-gf', drop_path_rate=0.4),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=768,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='small', drop_path_rate=0.4),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=768,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='tiny-gf', drop_path_rate=0.2),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HorNet', arch='tiny', drop_path_rate=0.2),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
dict(type='Constant', layer=['LayerScale'], val=1e-6)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w18'),
neck=[
dict(type='HRFuseScales', in_channels=(18, 36, 72, 144)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w30'),
neck=[
dict(type='HRFuseScales', in_channels=(30, 60, 120, 240)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w32'),
neck=[
dict(type='HRFuseScales', in_channels=(32, 64, 128, 256)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w40'),
neck=[
dict(type='HRFuseScales', in_channels=(40, 80, 160, 320)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w44'),
neck=[
dict(type='HRFuseScales', in_channels=(44, 88, 176, 352)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w48'),
neck=[
dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w64'),
neck=[
dict(type='HRFuseScales', in_channels=(64, 128, 256, 512)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment