"privacy/utils.py" did not exist on "54a48a1a0c412cc7b8ef75881243406b72dc0857"
Commit b12850fe authored by dengjb's avatar dengjb
Browse files

update codes

parent 6515fb96
Pipeline #1046 failed with stages
in 0 seconds
_base_ = 'grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py'
model = dict(
type='GroundingDINO',
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.3,
patch_norm=True),
neck=dict(in_channels=[256, 512, 1024]),
)
_base_ = 'grounding_dino_swin-b_pretrain_zeroshot_concat_dod.py'
model = dict(test_cfg=dict(chunked_size=1))
_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py'
data_root = 'data/d3/'
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities', 'sent_ids'))
]
# -------------------------------------------------#
val_dataset_full = dict(
type='DODDataset',
data_root=data_root,
ann_file='d3_json/d3_full_annotations.json',
data_prefix=dict(img='d3_images/', anno='d3_pkl'),
pipeline=test_pipeline,
test_mode=True,
backend_args=None,
return_classes=True)
val_evaluator_full = dict(
type='DODCocoMetric',
ann_file=data_root + 'd3_json/d3_full_annotations.json')
# -------------------------------------------------#
val_dataset_pres = dict(
type='DODDataset',
data_root=data_root,
ann_file='d3_json/d3_pres_annotations.json',
data_prefix=dict(img='d3_images/', anno='d3_pkl'),
pipeline=test_pipeline,
test_mode=True,
backend_args=None,
return_classes=True)
val_evaluator_pres = dict(
type='DODCocoMetric',
ann_file=data_root + 'd3_json/d3_pres_annotations.json')
# -------------------------------------------------#
val_dataset_abs = dict(
type='DODDataset',
data_root=data_root,
ann_file='d3_json/d3_abs_annotations.json',
data_prefix=dict(img='d3_images/', anno='d3_pkl'),
pipeline=test_pipeline,
test_mode=True,
backend_args=None,
return_classes=True)
val_evaluator_abs = dict(
type='DODCocoMetric',
ann_file=data_root + 'd3_json/d3_abs_annotations.json')
# -------------------------------------------------#
datasets = [val_dataset_full, val_dataset_pres, val_dataset_abs]
dataset_prefixes = ['FULL', 'PRES', 'ABS']
metrics = [val_evaluator_full, val_evaluator_pres, val_evaluator_abs]
val_dataloader = dict(
dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='MultiDatasetsEvaluator',
metrics=metrics,
dataset_prefixes=dataset_prefixes)
test_evaluator = val_evaluator
_base_ = 'grounding_dino_swin-t_pretrain_zeroshot_concat_dod.py'
model = dict(test_cfg=dict(chunked_size=1))
_base_ = '../grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py'
dataset_type = 'Flickr30kDataset'
data_root = 'data/flickr30k_entities/'
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities',
'tokens_positive', 'phrase_ids', 'phrases'))
]
dataset_Flickr30k_val = dict(
type=dataset_type,
data_root=data_root,
ann_file='final_flickr_separateGT_val.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
dataset_Flickr30k_test = dict(
type=dataset_type,
data_root=data_root,
ann_file='final_flickr_separateGT_test.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
val_evaluator_Flickr30k = dict(type='Flickr30kMetric')
test_evaluator_Flickr30k = dict(type='Flickr30kMetric')
# ----------Config---------- #
dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest']
datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test]
metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k]
val_dataloader = dict(
dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='MultiDatasetsEvaluator',
metrics=metrics,
dataset_prefixes=dataset_prefixes)
test_evaluator = val_evaluator
_base_ = [
'./grounding_dino_swin-t_finetune_16xb2_1x_coco.py',
]
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/grounding_dino/groundingdino_swinb_cogcoor_mmdet-55949c9c.pth' # noqa
model = dict(
type='GroundingDINO',
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.3,
patch_norm=True),
neck=dict(in_channels=[256, 512, 1024]),
)
_base_ = [
'./grounding_dino_swin-t_pretrain_obj365_goldg_cap4m.py',
]
model = dict(
type='GroundingDINO',
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.3,
patch_norm=True),
neck=dict(in_channels=[256, 512, 1024]),
)
_base_ = 'grounding_dino_swin-t_finetune_16xb2_1x_coco.py'
data_root = 'data/cat/'
class_name = ('cat', )
num_classes = len(class_name)
metainfo = dict(classes=class_name, palette=[(220, 20, 60)])
model = dict(bbox_head=dict(num_classes=num_classes))
train_dataloader = dict(
dataset=dict(
data_root=data_root,
metainfo=metainfo,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/')))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
ann_file='annotations/test.json',
data_prefix=dict(img='images/')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/test.json')
test_evaluator = val_evaluator
max_epoch = 20
default_hooks = dict(
checkpoint=dict(interval=1, max_keep_ckpts=1, save_best='auto'),
logger=dict(type='LoggerHook', interval=5))
train_cfg = dict(max_epochs=max_epoch, val_interval=1)
param_scheduler = [
dict(type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=30),
dict(
type='MultiStepLR',
begin=0,
end=max_epoch,
by_epoch=True,
milestones=[15],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(lr=0.00005),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
'language_model': dict(lr_mult=0),
}))
auto_scale_lr = dict(base_batch_size=16)
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment