dict(type='IndoorPointSample',# Sample indoor points, refer to mmdet3d.datasets.pipelines.indoor_sample for more details
num_points=40000),# Number of points to be sampled
dict(type='IndoorFlipData',# Augmentation pipeline that flip points and 3d boxes
flip_ratio_yz=0.5,# Probability of being flipped along yz plane
flip_ratio_xz=0.5),# Probability of being flipped along xz plane
dict(
type='IndoorGlobalRotScale',# Augmentation pipeline that rotate and scale points and 3d boxes, refer to mmdet3d.datasets.pipelines.indoor_augment for more details
shift_height=True,# Whether to use height
rot_range=[-0.027777777777777776,0.027777777777777776],# Range of rotation
scale_range=None),# Range of scale
dict(
type='DefaultFormatBundle3D',# Default format bundle to gather data in the pipeline, refer to mmdet3d.datasets.pipelines.formating for more details
type='Collect3D',# Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.pipelines.formating for more details
dict(type='Collect3D',# Pipeline that decides which keys in the data should be passed to the detector, refer to mmdet3d.datasets.pipelines.formating for more details
keys=['points'])
]
data=dict(
samples_per_gpu=8,# Batch size of a single GPU
workers_per_gpu=4,# Worker to pre-fetch data for each single GPU
train=dict(# Train dataset config
type='RepeatDataset',# Wrapper of dataset, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/dataset_wrappers.py for details.
times=5,# Repeat times
dataset=dict(
type='ScanNetDataset',# Type of dataset
data_root='./data/scannet/',# Root path of the data
ann_file='./data/scannet/scannet_infos_train.pkl',# Ann path of the data
pipeline=[# pipeline, this is passed by the train_pipeline created before.
optimizer=dict(# Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch
type='Adam',# Type of optimizers, # Type of optimizers, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13 for more details
lr=0.008)# Learning rate of optimizers, see detail usages of the parameters in the documentaion of PyTorch
optimizer_config=dict(# Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details.
grad_clip=dict(# Config used to grad_clip
max_norm=10,# max norm of the gradients
norm_type=2))# Type of the used p-norm. Can be 'inf' for infinity norm.
lr_config=dict(# Learning rate scheduler config used to register LrUpdater hook
policy='step',# The policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
warmup=None,# The warmup policy, also support `exp` and `constant`.
step=[24,32])# Steps to decay the learning rate
checkpoint_config=dict(# Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation.
interval=1)# The save interval is 1
log_config=dict(# config to register logger hook
interval=50,# Interval to print the log
hooks=[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')])# The logger used to record the training process.
total_epochs=36# Total epochs to train the model
dist_params=dict(backend='nccl')# Parameters to setup distributed training, the port can also be set.
log_level='INFO'# The level of logging.
find_unused_parameters=True# Whether to find unused parameters
work_dir=None# Directory to save the model checkpoints and logs for the current experiments.
load_from=None# load models as a pre-trained model from a given path. This will not resume training.
resume_from=None# Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved.
workflow=[('train',1)]# Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 36 epochs according to the total_epochs.