Unverified Commit c04831c5 authored by Xiang Xu's avatar Xiang Xu Committed by GitHub
Browse files

[Fix] Update new config type (#2655)

* update new config

* fix bug
parent 5638af7d
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine.config import read_base
with read_base():
from .._base_.datasets.nus_3d import *
from .._base_.models.centerpoint_pillar02_second_secfpn_nus import *
from .._base_.schedules.cyclic_20e import *
......@@ -29,9 +31,9 @@ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
data_prefix.merge(
data_prefix.update(
dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'))
model.merge(
model.update(
dict(
data_preprocessor=dict(
voxel_layer=dict(point_cloud_range=point_cloud_range)),
......@@ -167,13 +169,13 @@ train_dataloader.merge(
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR',
backend_args=backend_args))))
test_dataloader.merge(
test_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
val_dataloader.merge(
val_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
train_cfg.merge(dict(val_interval=20))
train_cfg.update(dict(val_interval=20))
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base
with read_base():
from .._base_.datasets.nus_3d import *
from .._base_.models.centerpoint_voxel01_second_secfpn_nus import *
from .._base_.schedules.cyclic_20e import *
......@@ -29,9 +31,9 @@ class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
data_prefix.merge(
data_prefix.update(
dict(pts='samples/LIDAR_TOP', img='', sweeps='sweeps/LIDAR_TOP'))
model.merge(
model.update(
dict(
data_preprocessor=dict(
voxel_layer=dict(point_cloud_range=point_cloud_range)),
......@@ -167,13 +169,13 @@ train_dataloader.merge(
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR',
backend_args=backend_args))))
test_dataloader.merge(
test_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
val_dataloader.merge(
val_dataloader.update(
dict(
dataset=dict(
pipeline=test_pipeline, metainfo=dict(classes=class_names))))
train_cfg.merge(dict(val_interval=20))
train_cfg.update(dict(val_interval=20))
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base
with read_base():
from .._base_.datasets.semantickitti import *
from .._base_.models.minkunet import *
from .._base_.schedules.schedule_3x import *
......@@ -15,7 +17,7 @@ from mmdet3d.datasets.transforms.loading import (LoadAnnotations3D,
from mmdet3d.datasets.transforms.transforms_3d import (GlobalRotScaleTrans,
LaserMix, PolarMix)
model.merge(
model.update(
dict(
data_preprocessor=dict(max_voxels=None),
backbone=dict(encoder_blocks=[2, 3, 4, 6])))
......@@ -92,6 +94,6 @@ train_pipeline = [
dict(type=Pack3DDetInputs, keys=['points', 'pts_semantic_mask'])
]
train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline)))
train_dataloader.update(dict(dataset=dict(pipeline=train_pipeline)))
default_hooks.merge(dict(checkpoint=dict(type=CheckpointHook, interval=1)))
default_hooks.update(dict(checkpoint=dict(type=CheckpointHook, interval=1)))
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base
with read_base():
from .._base_.schedules.cosine import *
from .._base_.default_runtime import *
......@@ -293,7 +295,7 @@ test_dataloader = dict(
box_type_3d='LiDAR',
backend_args=backend_args))
optim_wrapper.merge(
optim_wrapper.update(
dict(
optimizer=dict(weight_decay=0.01),
clip_grad=dict(max_norm=35, norm_type=2),
......
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base
with read_base():
from .._base_.datasets.kitti_mono3d import *
from .._base_.models.pgd import *
from .._base_.schedules.mmdet_schedule_1x import *
......@@ -19,7 +21,7 @@ from mmdet3d.models.losses.uncertain_smooth_l1_loss import \
from mmdet3d.models.task_modules.coders.pgd_bbox_coder import PGDBBoxCoder
# model settings
model.merge(
model.update(
dict(
data_preprocessor=dict(
type=Det3DDataPreprocessor,
......@@ -121,13 +123,13 @@ test_pipeline = [
dict(type=Pack3DDetInputs, keys=['img'])
]
train_dataloader.merge(
train_dataloader.update(
dict(batch_size=3, num_workers=3, dataset=dict(pipeline=train_pipeline)))
test_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline)))
val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline)))
test_dataloader.update(dict(dataset=dict(pipeline=test_pipeline)))
val_dataloader.update(dict(dataset=dict(pipeline=test_pipeline)))
# optimizer
optim_wrapper.merge(
optim_wrapper.update(
dict(
optimizer=dict(lr=0.001),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
......@@ -146,5 +148,5 @@ param_scheduler = [
gamma=0.1)
]
train_cfg.merge(dict(max_epochs=48, val_interval=2))
auto_scale_lr.merge(dict(base_batch_size=12))
train_cfg.update(dict(max_epochs=48, val_interval=2))
auto_scale_lr.update(dict(base_batch_size=12))
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmengine import read_base
with read_base():
from .._base_.datasets.scannet_3d import *
from .._base_.models.votenet import *
from .._base_.schedules.schedule_3x import *
......@@ -11,7 +13,7 @@ from mmdet3d.models.task_modules.coders.partial_bin_based_bbox_coder import \
PartialBinBasedBBoxCoder
# model settings
model.merge(
model.update(
dict(
bbox_head=dict(
num_classes=18,
......@@ -39,9 +41,9 @@ model.merge(
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]))))
default_hooks.merge(dict(logger=dict(type=LoggerHook, interval=30)))
default_hooks.update(dict(logger=dict(type=LoggerHook, interval=30)))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr.merge(dict(enable=False, base_batch_size=64))
auto_scale_lr.update(dict(enable=False, base_batch_size=64))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment