Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
xdecoder_mmcv
Commits
a8562a56
Commit
a8562a56
authored
Aug 20, 2024
by
luopl
Browse files
Initial commit
parents
Pipeline
#1564
canceled with stages
Changes
1000
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1444 additions
and
0 deletions
+1444
-0
configs/_base_/datasets/ade20k_instance.py
configs/_base_/datasets/ade20k_instance.py
+53
-0
configs/_base_/datasets/ade20k_panoptic.py
configs/_base_/datasets/ade20k_panoptic.py
+38
-0
configs/_base_/datasets/ade20k_semantic.py
configs/_base_/datasets/ade20k_semantic.py
+48
-0
configs/_base_/datasets/cityscapes_detection.py
configs/_base_/datasets/cityscapes_detection.py
+84
-0
configs/_base_/datasets/cityscapes_instance.py
configs/_base_/datasets/cityscapes_instance.py
+113
-0
configs/_base_/datasets/coco_caption.py
configs/_base_/datasets/coco_caption.py
+60
-0
configs/_base_/datasets/coco_detection.py
configs/_base_/datasets/coco_detection.py
+95
-0
configs/_base_/datasets/coco_instance.py
configs/_base_/datasets/coco_instance.py
+95
-0
configs/_base_/datasets/coco_instance_semantic.py
configs/_base_/datasets/coco_instance_semantic.py
+78
-0
configs/_base_/datasets/coco_panoptic.py
configs/_base_/datasets/coco_panoptic.py
+94
-0
configs/_base_/datasets/coco_semantic.py
configs/_base_/datasets/coco_semantic.py
+78
-0
configs/_base_/datasets/deepfashion.py
configs/_base_/datasets/deepfashion.py
+95
-0
configs/_base_/datasets/dsdl.py
configs/_base_/datasets/dsdl.py
+62
-0
configs/_base_/datasets/isaid_instance.py
configs/_base_/datasets/isaid_instance.py
+59
-0
configs/_base_/datasets/lvis_v0.5_instance.py
configs/_base_/datasets/lvis_v0.5_instance.py
+79
-0
configs/_base_/datasets/lvis_v1_instance.py
configs/_base_/datasets/lvis_v1_instance.py
+22
-0
configs/_base_/datasets/mot_challenge.py
configs/_base_/datasets/mot_challenge.py
+90
-0
configs/_base_/datasets/mot_challenge_det.py
configs/_base_/datasets/mot_challenge_det.py
+66
-0
configs/_base_/datasets/mot_challenge_reid.py
configs/_base_/datasets/mot_challenge_reid.py
+61
-0
configs/_base_/datasets/objects365v1_detection.py
configs/_base_/datasets/objects365v1_detection.py
+74
-0
No files found.
Too many changes to show.
To preserve performance only
1000 of 1000+
files are displayed.
Plain diff
Email patch
configs/_base_/datasets/ade20k_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'ADE20KInstanceDataset'
data_root
=
'data/ADEChallengeData2016/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
2560
,
640
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'ade20k_instance_val.json'
,
data_prefix
=
dict
(
img
=
'images/validation'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'ade20k_instance_val.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/ade20k_panoptic.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'ADE20KPanopticDataset'
data_root
=
'data/ADEChallengeData2016/'
backend_args
=
None
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
2560
,
640
),
keep_ratio
=
True
),
dict
(
type
=
'LoadPanopticAnnotations'
,
backend_args
=
backend_args
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
0
,
persistent_workers
=
False
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'ade20k_panoptic_val.json'
,
data_prefix
=
dict
(
img
=
'images/validation/'
,
seg
=
'ade20k_panoptic_val/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoPanopticMetric'
,
ann_file
=
data_root
+
'ade20k_panoptic_val.json'
,
seg_prefix
=
data_root
+
'ade20k_panoptic_val/'
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/ade20k_semantic.py
0 → 100644
View file @
a8562a56
dataset_type
=
'ADE20KSegDataset'
data_root
=
'data/ADEChallengeData2016/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
2048
,
512
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
False
,
with_mask
=
False
,
with_seg
=
True
,
reduce_zero_label
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_path'
,
'ori_shape'
,
'img_shape'
))
]
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
data_prefix
=
dict
(
img_path
=
'images/validation'
,
seg_map_path
=
'annotations/validation'
),
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'SemSegMetric'
,
iou_metrics
=
[
'mIoU'
])
test_evaluator
=
val_evaluator
configs/_base_/datasets/cityscapes_detection.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CityscapesDataset'
data_root
=
'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'RandomResize'
,
scale
=
[(
2048
,
800
),
(
2048
,
1024
)],
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
2048
,
1024
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
'RepeatDataset'
,
times
=
8
,
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instancesonly_filtered_gtFine_train.json'
,
data_prefix
=
dict
(
img
=
'leftImg8bit/train/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
)))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instancesonly_filtered_gtFine_val.json'
,
data_prefix
=
dict
(
img
=
'leftImg8bit/val/'
),
test_mode
=
True
,
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/instancesonly_filtered_gtFine_val.json'
,
metric
=
'bbox'
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/cityscapes_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CityscapesDataset'
data_root
=
'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'RandomResize'
,
scale
=
[(
2048
,
800
),
(
2048
,
1024
)],
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
2048
,
1024
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
'RepeatDataset'
,
times
=
8
,
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instancesonly_filtered_gtFine_train.json'
,
data_prefix
=
dict
(
img
=
'leftImg8bit/train/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
)))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instancesonly_filtered_gtFine_val.json'
,
data_prefix
=
dict
(
img
=
'leftImg8bit/val/'
),
test_mode
=
True
,
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
[
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/instancesonly_filtered_gtFine_val.json'
,
metric
=
[
'bbox'
,
'segm'
],
backend_args
=
backend_args
),
dict
(
type
=
'CityScapesMetric'
,
seg_prefix
=
data_root
+
'gtFine/val'
,
outfile_prefix
=
'./work_dirs/cityscapes_metric/instance'
,
backend_args
=
backend_args
)
]
test_evaluator
=
val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
configs/_base_/datasets/coco_caption.py
0 → 100644
View file @
a8562a56
# data settings
dataset_type
=
'CocoCaptionDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
imdecode_backend
=
'pillow'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
224
,
224
),
interpolation
=
'bicubic'
,
backend
=
'pillow'
),
dict
(
type
=
'PackInputs'
,
meta_keys
=
[
'image_id'
]),
]
# ann_file download from
# train dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json # noqa
# val dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json # noqa
# test dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json # noqa
# val evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json # noqa
# test evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json # noqa
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/coco_karpathy_val.json'
,
pipeline
=
test_pipeline
,
))
val_evaluator
=
dict
(
type
=
'COCOCaptionMetric'
,
ann_file
=
data_root
+
'annotations/coco_karpathy_val_gt.json'
,
)
# # If you want standard test, please manually configure the test dataset
test_dataloader
=
val_dataloader
test_evaluator
=
val_evaluator
configs/_base_/datasets/coco_detection.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_train2017.json'
,
data_prefix
=
dict
(
img
=
'train2017/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_val2017.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
metric
=
'bbox'
,
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
configs/_base_/datasets/coco_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_train2017.json'
,
data_prefix
=
dict
(
img
=
'train2017/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_val2017.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
configs/_base_/datasets/coco_instance_semantic.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
,
with_seg
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
,
with_seg
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_train2017.json'
,
data_prefix
=
dict
(
img
=
'train2017/'
,
seg
=
'stuffthingmaps/train2017/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/instances_val2017.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/instances_val2017.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/coco_panoptic.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoPanopticDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadPanopticAnnotations'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'LoadPanopticAnnotations'
,
backend_args
=
backend_args
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/panoptic_train2017.json'
,
data_prefix
=
dict
(
img
=
'train2017/'
,
seg
=
'annotations/panoptic_train2017/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/panoptic_val2017.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
,
seg
=
'annotations/panoptic_val2017/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoPanopticMetric'
,
ann_file
=
data_root
+
'annotations/panoptic_val2017.json'
,
seg_prefix
=
data_root
+
'annotations/panoptic_val2017/'
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
configs/_base_/datasets/coco_semantic.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoSegDataset'
data_root
=
'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
False
,
with_label
=
False
,
with_seg
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
False
,
with_label
=
False
,
with_seg
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
# For stuffthingmaps_semseg, please refer to
# `docs/en/user_guides/dataset_prepare.md`
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
data_prefix
=
dict
(
img_path
=
'train2017/'
,
seg_map_path
=
'stuffthingmaps_semseg/train2017/'
),
pipeline
=
train_pipeline
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
data_prefix
=
dict
(
img_path
=
'val2017/'
,
seg_map_path
=
'stuffthingmaps_semseg/val2017/'
),
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'SemSegMetric'
,
iou_metrics
=
[
'mIoU'
])
test_evaluator
=
val_evaluator
configs/_base_/datasets/deepfashion.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'DeepFashionDataset'
data_root
=
'data/DeepFashion/In-shop/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
750
,
1101
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
750
,
1101
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
'RepeatDataset'
,
times
=
2
,
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'Anno/segmentation/DeepFashion_segmentation_train.json'
,
data_prefix
=
dict
(
img
=
'Img/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
)))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'Anno/segmentation/DeepFashion_segmentation_query.json'
,
data_prefix
=
dict
(
img
=
'Img/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'Anno/segmentation/DeepFashion_segmentation_gallery.json'
,
data_prefix
=
dict
(
img
=
'Img/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'Anno/segmentation/DeepFashion_segmentation_query.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'Anno/segmentation/DeepFashion_segmentation_gallery.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
configs/_base_/datasets/dsdl.py
0 → 100644
View file @
a8562a56
dataset_type
=
'DSDLDetDataset'
data_root
=
'path to dataset folder'
train_ann
=
'path to train yaml file'
val_ann
=
'path to val yaml file'
backend_args
=
None
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': "s3://open_data/",
# 'data/': "s3://open_data/"
# }))
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
,
'instances'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
train_ann
,
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
,
bbox_min_size
=
32
),
pipeline
=
train_pipeline
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
val_ann
,
test_mode
=
True
,
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
metric
=
'bbox'
)
# val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator
=
val_evaluator
configs/_base_/datasets/isaid_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'iSAIDDataset'
data_root
=
'data/iSAID/'
backend_args
=
None
# Please see `projects/iSAID/README.md` for data preparation
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
800
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
800
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'train/instancesonly_filtered_train.json'
,
data_prefix
=
dict
(
img
=
'train/images/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'val/instancesonly_filtered_val.json'
,
data_prefix
=
dict
(
img
=
'val/images/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'val/instancesonly_filtered_val.json'
,
metric
=
[
'bbox'
,
'segm'
],
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/lvis_v0.5_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'LVISV05Dataset'
data_root
=
'data/lvis_v0.5/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/lvis_v0.5/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'RandomChoiceResize'
,
scales
=
[(
1333
,
640
),
(
1333
,
672
),
(
1333
,
704
),
(
1333
,
736
),
(
1333
,
768
),
(
1333
,
800
)],
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
,
with_mask
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
'ClassBalancedDataset'
,
oversample_thr
=
1e-3
,
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/lvis_v0.5_train.json'
,
data_prefix
=
dict
(
img
=
'train2017/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
)))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/lvis_v0.5_val.json'
,
data_prefix
=
dict
(
img
=
'val2017/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'LVISMetric'
,
ann_file
=
data_root
+
'annotations/lvis_v0.5_val.json'
,
metric
=
[
'bbox'
,
'segm'
],
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/lvis_v1_instance.py
0 → 100644
View file @
a8562a56
# dataset settings
_base_
=
'lvis_v0.5_instance.py'
dataset_type
=
'LVISV1Dataset'
data_root
=
'data/lvis_v1/'
train_dataloader
=
dict
(
dataset
=
dict
(
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/lvis_v1_train.json'
,
data_prefix
=
dict
(
img
=
''
))))
val_dataloader
=
dict
(
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/lvis_v1_val.json'
,
data_prefix
=
dict
(
img
=
''
)))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
ann_file
=
data_root
+
'annotations/lvis_v1_val.json'
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/mot_challenge.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'MOTChallengeDataset'
data_root
=
'data/MOT17/'
img_scale
=
(
1088
,
1088
)
backend_args
=
None
# data pipeline
train_pipeline
=
[
dict
(
type
=
'UniformRefFrameSample'
,
num_ref_imgs
=
1
,
frame_range
=
10
,
filter_key_img
=
True
),
dict
(
type
=
'TransformBroadcaster'
,
share_random_params
=
True
,
transforms
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadTrackAnnotations'
),
dict
(
type
=
'RandomResize'
,
scale
=
img_scale
,
ratio_range
=
(
0.8
,
1.2
),
keep_ratio
=
True
,
clip_object_border
=
False
),
dict
(
type
=
'PhotoMetricDistortion'
)
]),
dict
(
type
=
'TransformBroadcaster'
,
# different cropped positions for different frames
share_random_params
=
False
,
transforms
=
[
dict
(
type
=
'RandomCrop'
,
crop_size
=
img_scale
,
bbox_clip_border
=
False
)
]),
dict
(
type
=
'TransformBroadcaster'
,
share_random_params
=
True
,
transforms
=
[
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
]),
dict
(
type
=
'PackTrackInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'TransformBroadcaster'
,
transforms
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
img_scale
,
keep_ratio
=
True
),
dict
(
type
=
'LoadTrackAnnotations'
)
]),
dict
(
type
=
'PackTrackInputs'
)
]
# dataloader
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'TrackImgSampler'
),
# image-based sampling
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
visibility_thr
=-
1
,
ann_file
=
'annotations/half-train_cocoformat.json'
,
data_prefix
=
dict
(
img_path
=
'train'
),
metainfo
=
dict
(
classes
=
(
'pedestrian'
,
)),
pipeline
=
train_pipeline
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
# Now we support two ways to test, image_based and video_based
# if you want to use video_based sampling, you can use as follows
# sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
sampler
=
dict
(
type
=
'TrackImgSampler'
),
# image-based sampling
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/half-val_cocoformat.json'
,
data_prefix
=
dict
(
img_path
=
'train'
),
test_mode
=
True
,
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
# evaluator
val_evaluator
=
dict
(
type
=
'MOTChallengeMetric'
,
metric
=
[
'HOTA'
,
'CLEAR'
,
'Identity'
])
test_evaluator
=
val_evaluator
configs/_base_/datasets/mot_challenge_det.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'CocoDataset'
data_root
=
'data/MOT17/'
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
,
to_float32
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'RandomResize'
,
scale
=
(
1088
,
1088
),
ratio_range
=
(
0.8
,
1.2
),
keep_ratio
=
True
,
clip_object_border
=
False
),
dict
(
type
=
'PhotoMetricDistortion'
),
dict
(
type
=
'RandomCrop'
,
crop_size
=
(
1088
,
1088
),
bbox_clip_border
=
False
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1088
,
1088
),
keep_ratio
=
True
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/half-train_cocoformat.json'
,
data_prefix
=
dict
(
img
=
'train/'
),
metainfo
=
dict
(
classes
=
(
'pedestrian'
,
)),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/half-val_cocoformat.json'
,
data_prefix
=
dict
(
img
=
'train/'
),
metainfo
=
dict
(
classes
=
(
'pedestrian'
,
)),
test_mode
=
True
,
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/half-val_cocoformat.json'
,
metric
=
'bbox'
,
format_only
=
False
)
test_evaluator
=
val_evaluator
configs/_base_/datasets/mot_challenge_reid.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'ReIDDataset'
data_root
=
'data/MOT17/'
backend_args
=
None
# data pipeline
train_pipeline
=
[
dict
(
type
=
'TransformBroadcaster'
,
share_random_params
=
False
,
transforms
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
,
to_float32
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
128
,
256
),
keep_ratio
=
False
,
clip_object_border
=
False
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
,
direction
=
'horizontal'
),
]),
dict
(
type
=
'PackReIDInputs'
,
meta_keys
=
(
'flip'
,
'flip_direction'
))
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
,
to_float32
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
128
,
256
),
keep_ratio
=
False
),
dict
(
type
=
'PackReIDInputs'
)
]
# dataloader
train_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
triplet_sampler
=
dict
(
num_ids
=
8
,
ins_per_id
=
4
),
data_prefix
=
dict
(
img_path
=
'reid/imgs'
),
ann_file
=
'reid/meta/train_80.txt'
,
pipeline
=
train_pipeline
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
triplet_sampler
=
None
,
data_prefix
=
dict
(
img_path
=
'reid/imgs'
),
ann_file
=
'reid/meta/val_20.txt'
,
pipeline
=
test_pipeline
))
test_dataloader
=
val_dataloader
# evaluator
val_evaluator
=
dict
(
type
=
'ReIDMetrics'
,
metric
=
[
'mAP'
,
'CMC'
])
test_evaluator
=
val_evaluator
configs/_base_/datasets/objects365v1_detection.py
0 → 100644
View file @
a8562a56
# dataset settings
dataset_type
=
'Objects365V1Dataset'
data_root
=
'data/Objects365/Obj365_v1/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args
=
None
train_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
dict
(
type
=
'RandomFlip'
,
prob
=
0.5
),
dict
(
type
=
'PackDetInputs'
)
]
test_pipeline
=
[
dict
(
type
=
'LoadImageFromFile'
,
backend_args
=
backend_args
),
dict
(
type
=
'Resize'
,
scale
=
(
1333
,
800
),
keep_ratio
=
True
),
# If you don't have a gt annotation, delete the pipeline
dict
(
type
=
'LoadAnnotations'
,
with_bbox
=
True
),
dict
(
type
=
'PackDetInputs'
,
meta_keys
=
(
'img_id'
,
'img_path'
,
'ori_shape'
,
'img_shape'
,
'scale_factor'
))
]
train_dataloader
=
dict
(
batch_size
=
2
,
num_workers
=
2
,
persistent_workers
=
True
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
True
),
batch_sampler
=
dict
(
type
=
'AspectRatioBatchSampler'
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/objects365_train.json'
,
data_prefix
=
dict
(
img
=
'train/'
),
filter_cfg
=
dict
(
filter_empty_gt
=
True
,
min_size
=
32
),
pipeline
=
train_pipeline
,
backend_args
=
backend_args
))
val_dataloader
=
dict
(
batch_size
=
1
,
num_workers
=
2
,
persistent_workers
=
True
,
drop_last
=
False
,
sampler
=
dict
(
type
=
'DefaultSampler'
,
shuffle
=
False
),
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
'annotations/objects365_val.json'
,
data_prefix
=
dict
(
img
=
'val/'
),
test_mode
=
True
,
pipeline
=
test_pipeline
,
backend_args
=
backend_args
))
test_dataloader
=
val_dataloader
val_evaluator
=
dict
(
type
=
'CocoMetric'
,
ann_file
=
data_root
+
'annotations/objects365_val.json'
,
metric
=
'bbox'
,
sort_categories
=
True
,
format_only
=
False
,
backend_args
=
backend_args
)
test_evaluator
=
val_evaluator
Prev
1
2
3
4
5
6
…
50
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment