Unverified Commit 32567b04 authored by Shaoshuai Shi's avatar Shaoshuai Shi Committed by GitHub
Browse files

Merge pull request #192 from sshaoshuai/master

Release OpenPCDet v0.3.0
parents 853b759b 04e0d4f0
...@@ -34,7 +34,8 @@ MODEL: ...@@ -34,7 +34,8 @@ MODEL:
DIR_LIMIT_OFFSET: 0.0 DIR_LIMIT_OFFSET: 0.0
NUM_DIR_BINS: 2 NUM_DIR_BINS: 2
USE_MULTI_HEAD: True USE_MULTIHEAD: True
SEPARATE_MULTIHEAD: True
ANCHOR_GENERATOR_CONFIG: [ ANCHOR_GENERATOR_CONFIG: [
{ {
'class_name': 'Car', 'class_name': 'Car',
...@@ -52,7 +53,7 @@ MODEL: ...@@ -52,7 +53,7 @@ MODEL:
'anchor_rotations': [0, 1.57], 'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.6], 'anchor_bottom_heights': [-1.6],
'align_center': False, 'align_center': False,
'feature_map_stride': 4, 'feature_map_stride': 8,
'matched_threshold': 0.5, 'matched_threshold': 0.5,
'unmatched_threshold': 0.35 'unmatched_threshold': 0.35
}, },
...@@ -62,36 +63,23 @@ MODEL: ...@@ -62,36 +63,23 @@ MODEL:
'anchor_rotations': [0, 1.57], 'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.6], 'anchor_bottom_heights': [-1.6],
'align_center': False, 'align_center': False,
'feature_map_stride': 4, 'feature_map_stride': 8,
'matched_threshold': 0.5, 'matched_threshold': 0.5,
'unmatched_threshold': 0.35 'unmatched_threshold': 0.35
} }
] ]
SHARED_CONV_NUM_FILTER: 64
RPN_HEAD_CFGS: [ RPN_HEAD_CFGS: [
{ {
'HEAD_CLS_NAME': ['Car'], 'HEAD_CLS_NAME': ['Car'],
'LAYER_NUMS': [1],
'LAYER_STRIDES': [1],
'NUM_FILTERS': [512],
'UPSAMPLE_STRIDES': [1],
'NUM_UPSAMPLE_FILTERS': [512]
}, },
{ {
'HEAD_CLS_NAME': ['Pedestrian'], 'HEAD_CLS_NAME': ['Pedestrian'],
'LAYER_NUMS': [1],
'LAYER_STRIDES': [1],
'NUM_FILTERS': [512],
'UPSAMPLE_STRIDES': [2],
'NUM_UPSAMPLE_FILTERS': [512]
}, },
{ {
'HEAD_CLS_NAME': ['Cyclist'], 'HEAD_CLS_NAME': ['Cyclist'],
'LAYER_NUMS': [1],
'LAYER_STRIDES': [1],
'NUM_FILTERS': [512],
'UPSAMPLE_STRIDES': [2],
'NUM_UPSAMPLE_FILTERS': [512]
} }
] ]
...@@ -128,6 +116,9 @@ MODEL: ...@@ -128,6 +116,9 @@ MODEL:
OPTIMIZATION: OPTIMIZATION:
BATCH_SIZE_PER_GPU: 4
NUM_EPOCHS: 80
OPTIMIZER: adam_onecycle OPTIMIZER: adam_onecycle
LR: 0.003 LR: 0.003
WEIGHT_DECAY: 0.01 WEIGHT_DECAY: 0.01
......
CLASS_NAMES: ['car','truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
DATA_CONFIG:
_BASE_CONFIG_: cfgs/dataset_configs/nuscenes_dataset.yaml
POINT_CLOUD_RANGE: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
DATA_PROCESSOR:
- NAME: mask_points_and_boxes_outside_range
REMOVE_OUTSIDE_BOXES: True
- NAME: shuffle_points
SHUFFLE_ENABLED: {
'train': True,
'test': True
}
- NAME: transform_points_to_voxels
VOXEL_SIZE: [0.2, 0.2, 8.0]
MAX_POINTS_PER_VOXEL: 20
MAX_NUMBER_OF_VOXELS: {
'train': 30000,
'test': 30000
}
MODEL:
NAME: PointPillar
VFE:
NAME: PillarVFE
WITH_DISTANCE: False
USE_ABSLOTE_XYZ: True
USE_NORM: True
NUM_FILTERS: [64]
MAP_TO_BEV:
NAME: PointPillarScatter
NUM_BEV_FEATURES: 64
BACKBONE_2D:
NAME: BaseBEVBackbone
LAYER_NUMS: [3, 5, 5]
LAYER_STRIDES: [2, 2, 2]
NUM_FILTERS: [64, 128, 256]
UPSAMPLE_STRIDES: [0.5, 1, 2]
NUM_UPSAMPLE_FILTERS: [128, 128, 128]
DENSE_HEAD:
NAME: AnchorHeadMulti
CLASS_AGNOSTIC: False
DIR_OFFSET: 0.78539
DIR_LIMIT_OFFSET: 0.0
NUM_DIR_BINS: 2
USE_MULTIHEAD: True
SEPARATE_MULTIHEAD: True
ANCHOR_GENERATOR_CONFIG: [
{
'class_name': car,
'anchor_sizes': [[4.63, 1.97, 1.74]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.95],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
},
{
'class_name': truck,
'anchor_sizes': [[6.93, 2.51, 2.84]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': construction_vehicle,
'anchor_sizes': [[6.37, 2.85, 3.19]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.225],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': bus,
'anchor_sizes': [[10.5, 2.94, 3.47]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.085],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': trailer,
'anchor_sizes': [[12.29, 2.90, 3.87]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [0.115],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': barrier,
'anchor_sizes': [[0.50, 2.53, 0.98]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.33],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': motorcycle,
'anchor_sizes': [[2.11, 0.77, 1.47]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.085],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.5,
'unmatched_threshold': 0.3
},
{
'class_name': bicycle,
'anchor_sizes': [[1.70, 0.60, 1.28]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.18],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': pedestrian,
'anchor_sizes': [[0.73, 0.67, 1.77]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.935],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.6,
'unmatched_threshold': 0.4
},
{
'class_name': traffic_cone,
'anchor_sizes': [[0.41, 0.41, 1.07]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.285],
'align_center': False,
'feature_map_stride': 4,
'matched_threshold': 0.6,
'unmatched_threshold': 0.4
},
]
SHARED_CONV_NUM_FILTER: 64
RPN_HEAD_CFGS: [
{
'HEAD_CLS_NAME': ['car'],
},
{
'HEAD_CLS_NAME': ['truck', 'construction_vehicle'],
},
{
'HEAD_CLS_NAME': ['bus', 'trailer'],
},
{
'HEAD_CLS_NAME': ['barrier'],
},
{
'HEAD_CLS_NAME': ['motorcycle', 'bicycle'],
},
{
'HEAD_CLS_NAME': ['pedestrian', 'traffic_cone'],
},
]
SEPARATE_REG_CONFIG:
NUM_MIDDLE_CONV: 1
NUM_MIDDLE_FILTER: 64
REG_LIST: ['reg:2', 'height:1', 'size:3', 'angle:2', 'velo:2']
TARGET_ASSIGNER_CONFIG:
NAME: AxisAlignedTargetAssigner
POS_FRACTION: -1.0
SAMPLE_SIZE: 512
NORM_BY_NUM_EXAMPLES: False
MATCH_HEIGHT: False
BOX_CODER: ResidualCoder
BOX_CODER_CONFIG: {
'code_size': 9,
'encode_angle_by_sincos': True
}
LOSS_CONFIG:
REG_LOSS_TYPE: WeightedL1Loss
LOSS_WEIGHTS: {
'pos_cls_weight': 1.0,
'neg_cls_weight': 2.0,
'cls_weight': 1.0,
'loc_weight': 0.25,
'dir_weight': 0.2,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2]
}
POST_PROCESSING:
RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
SCORE_THRESH: 0.1
OUTPUT_RAW_SCORE: False
EVAL_METRIC: kitti
NMS_CONFIG:
MULTI_CLASSES_NMS: True
NMS_TYPE: nms_gpu
NMS_THRESH: 0.2
NMS_PRE_MAXSIZE: 1000
NMS_POST_MAXSIZE: 83
OPTIMIZATION:
BATCH_SIZE_PER_GPU: 4
NUM_EPOCHS: 20
OPTIMIZER: adam_onecycle
LR: 0.001
WEIGHT_DECAY: 0.01
MOMENTUM: 0.9
MOMS: [0.95, 0.85]
PCT_START: 0.4
DIV_FACTOR: 10
DECAY_STEP_LIST: [35, 45]
LR_DECAY: 0.1
LR_CLIP: 0.0000001
LR_WARMUP: False
WARMUP_EPOCH: 1
GRAD_NORM_CLIP: 10
CLASS_NAMES: ['car','truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
DATA_CONFIG:
_BASE_CONFIG_: cfgs/dataset_configs/nuscenes_dataset.yaml
MODEL:
NAME: SECONDNet
VFE:
NAME: MeanVFE
BACKBONE_3D:
NAME: VoxelResBackBone8x
MAP_TO_BEV:
NAME: HeightCompression
NUM_BEV_FEATURES: 256
BACKBONE_2D:
NAME: BaseBEVBackbone
LAYER_NUMS: [5, 5]
LAYER_STRIDES: [1, 2]
NUM_FILTERS: [128, 256]
UPSAMPLE_STRIDES: [1, 2]
NUM_UPSAMPLE_FILTERS: [256, 256]
DENSE_HEAD:
NAME: AnchorHeadMulti
CLASS_AGNOSTIC: False
DIR_OFFSET: 0.78539
DIR_LIMIT_OFFSET: 0.0
NUM_DIR_BINS: 2
USE_MULTIHEAD: True
SEPARATE_MULTIHEAD: True
ANCHOR_GENERATOR_CONFIG: [
{
'class_name': car,
'anchor_sizes': [[4.63, 1.97, 1.74]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.95],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
},
{
'class_name': truck,
'anchor_sizes': [[6.93, 2.51, 2.84]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': construction_vehicle,
'anchor_sizes': [[6.37, 2.85, 3.19]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.225],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': bus,
'anchor_sizes': [[10.5, 2.94, 3.47]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.085],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': trailer,
'anchor_sizes': [[12.29, 2.90, 3.87]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [0.115],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': barrier,
'anchor_sizes': [[0.50, 2.53, 0.98]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.33],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.55,
'unmatched_threshold': 0.4
},
{
'class_name': motorcycle,
'anchor_sizes': [[2.11, 0.77, 1.47]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.085],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.3
},
{
'class_name': bicycle,
'anchor_sizes': [[1.70, 0.60, 1.28]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.18],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': pedestrian,
'anchor_sizes': [[0.73, 0.67, 1.77]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.935],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.6,
'unmatched_threshold': 0.4
},
{
'class_name': traffic_cone,
'anchor_sizes': [[0.41, 0.41, 1.07]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.285],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.6,
'unmatched_threshold': 0.4
},
]
SHARED_CONV_NUM_FILTER: 64
RPN_HEAD_CFGS: [
{
'HEAD_CLS_NAME': ['car'],
},
{
'HEAD_CLS_NAME': ['truck', 'construction_vehicle'],
},
{
'HEAD_CLS_NAME': ['bus', 'trailer'],
},
{
'HEAD_CLS_NAME': ['barrier'],
},
{
'HEAD_CLS_NAME': ['motorcycle', 'bicycle'],
},
{
'HEAD_CLS_NAME': ['pedestrian', 'traffic_cone'],
},
]
SEPARATE_REG_CONFIG:
NUM_MIDDLE_CONV: 1
NUM_MIDDLE_FILTER: 64
REG_LIST: ['reg:2', 'height:1', 'size:3', 'angle:2', 'velo:2']
TARGET_ASSIGNER_CONFIG:
NAME: AxisAlignedTargetAssigner
POS_FRACTION: -1.0
SAMPLE_SIZE: 512
NORM_BY_NUM_EXAMPLES: False
MATCH_HEIGHT: False
BOX_CODER: ResidualCoder
BOX_CODER_CONFIG: {
'code_size': 9,
'encode_angle_by_sincos': True
}
LOSS_CONFIG:
REG_LOSS_TYPE: WeightedL1Loss
LOSS_WEIGHTS: {
'pos_cls_weight': 1.0,
'neg_cls_weight': 2.0,
'cls_weight': 1.0,
'loc_weight': 0.25,
'dir_weight': 0.2,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2]
}
POST_PROCESSING:
RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
SCORE_THRESH: 0.1
OUTPUT_RAW_SCORE: False
EVAL_METRIC: kitti
NMS_CONFIG:
MULTI_CLASSES_NMS: True
NMS_TYPE: nms_gpu
NMS_THRESH: 0.2
NMS_PRE_MAXSIZE: 1000
NMS_POST_MAXSIZE: 83
OPTIMIZATION:
BATCH_SIZE_PER_GPU: 4
NUM_EPOCHS: 20
OPTIMIZER: adam_onecycle
LR: 0.003
WEIGHT_DECAY: 0.01
MOMENTUM: 0.9
MOMS: [0.95, 0.85]
PCT_START: 0.4
DIV_FACTOR: 10
DECAY_STEP_LIST: [35, 45]
LR_DECAY: 0.1
LR_CLIP: 0.0000001
LR_WARMUP: False
WARMUP_EPOCH: 1
GRAD_NORM_CLIP: 10
import torch
import argparse import argparse
import glob import glob
import numpy as np
from pathlib import Path from pathlib import Path
import mayavi.mlab as mlab
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu from pcdet.models import build_network, load_data_to_gpu
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.utils import common_utils from pcdet.utils import common_utils
from visual_utils import visualize_utils as V from visual_utils import visualize_utils as V
import mayavi.mlab as mlab
class DemoDataset(DatasetTemplate): class DemoDataset(DatasetTemplate):
...@@ -98,4 +100,4 @@ def main(): ...@@ -98,4 +100,4 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()
\ No newline at end of file
import tqdm
import time
import pickle import pickle
import time
import numpy as np import numpy as np
import torch import torch
from pcdet.utils import common_utils import tqdm
from pcdet.models import load_data_to_gpu from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict): def statistics_info(cfg, ret_dict, metric, disp_dict):
...@@ -20,11 +22,9 @@ def statistics_info(cfg, ret_dict, metric, disp_dict): ...@@ -20,11 +22,9 @@ def statistics_info(cfg, ret_dict, metric, disp_dict):
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None): def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True) result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file: if save_to_file:
final_output_dir = result_dir / 'final_result' / 'data'
final_output_dir.mkdir(parents=True, exist_ok=True) final_output_dir.mkdir(parents=True, exist_ok=True)
else:
final_output_dir = None
metric = { metric = {
'gt_num': 0, 'gt_num': 0,
...@@ -109,7 +109,8 @@ def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, sa ...@@ -109,7 +109,8 @@ def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, sa
result_str, result_dict = dataset.evaluation( result_str, result_dict = dataset.evaluation(
det_annos, class_names, det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
) )
logger.info(result_str) logger.info(result_str)
......
...@@ -9,6 +9,8 @@ PY_ARGS=${@:3} ...@@ -9,6 +9,8 @@ PY_ARGS=${@:3}
JOB_NAME=eval JOB_NAME=eval
SRUN_ARGS=${SRUN_ARGS:-""} SRUN_ARGS=${SRUN_ARGS:-""}
PORT=$(( ( RANDOM % 10000 ) + 10000 ))
srun -p ${PARTITION} \ srun -p ${PARTITION} \
--job-name=${JOB_NAME} \ --job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \ --gres=gpu:${GPUS_PER_NODE} \
...@@ -16,5 +18,5 @@ srun -p ${PARTITION} \ ...@@ -16,5 +18,5 @@ srun -p ${PARTITION} \
--ntasks-per-node=${GPUS_PER_NODE} \ --ntasks-per-node=${GPUS_PER_NODE} \
--kill-on-bad-exit=1 \ --kill-on-bad-exit=1 \
${SRUN_ARGS} \ ${SRUN_ARGS} \
python -u test.py --launcher slurm ${PY_ARGS} python -u test.py --launcher slurm ${PY_ARGS} --tcp_port $PORT
...@@ -11,6 +11,7 @@ GPUS_PER_NODE=${GPUS_PER_NODE:-8} ...@@ -11,6 +11,7 @@ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
CPUS_PER_TASK=${CPUS_PER_TASK:-5} CPUS_PER_TASK=${CPUS_PER_TASK:-5}
SRUN_ARGS=${SRUN_ARGS:-""} SRUN_ARGS=${SRUN_ARGS:-""}
PORT=$(( ( RANDOM % 10000 ) + 10000 ))
srun -p ${PARTITION} \ srun -p ${PARTITION} \
--job-name=${JOB_NAME} \ --job-name=${JOB_NAME} \
...@@ -20,4 +21,4 @@ srun -p ${PARTITION} \ ...@@ -20,4 +21,4 @@ srun -p ${PARTITION} \
--cpus-per-task=${CPUS_PER_TASK} \ --cpus-per-task=${CPUS_PER_TASK} \
--kill-on-bad-exit=1 \ --kill-on-bad-exit=1 \
${SRUN_ARGS} \ ${SRUN_ARGS} \
python -u train.py --launcher slurm ${PY_ARGS} python -u train.py --launcher slurm ${PY_ARGS} --tcp_port $PORT
import os import argparse
import torch import datetime
from tensorboardX import SummaryWriter
import time
import glob import glob
import os
import re import re
import datetime import time
import argparse
from pathlib import Path from pathlib import Path
import torch.distributed as dist
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader from pcdet.datasets import build_dataloader
from pcdet.models import build_network from pcdet.models import build_network
from pcdet.utils import common_utils from pcdet.utils import common_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from eval_utils import eval_utils
def parse_config(): def parse_config():
parser = argparse.ArgumentParser(description='arg parser') parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
...@@ -43,6 +43,9 @@ def parse_config(): ...@@ -43,6 +43,9 @@ def parse_config():
cfg_from_yaml_file(args.cfg_file, cfg) cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None: if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg) cfg_from_list(args.set_cfgs, cfg)
...@@ -132,12 +135,19 @@ def main(): ...@@ -132,12 +135,19 @@ def main():
args, cfg = parse_config() args, cfg = parse_config()
if args.launcher == 'none': if args.launcher == 'none':
dist_test = False dist_test = False
total_gpus = 1
else: else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)( total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl' args.tcp_port, args.local_rank, backend='nccl'
) )
dist_test = True dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True) output_dir.mkdir(parents=True, exist_ok=True)
...@@ -163,7 +173,6 @@ def main(): ...@@ -163,7 +173,6 @@ def main():
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test: if dist_test:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size)) logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items(): for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val)) logger.info('{:16} {}'.format(key, val))
...@@ -188,4 +197,3 @@ def main(): ...@@ -188,4 +197,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()
import argparse
import datetime
import glob
import os import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch import torch
import torch.distributed as dist
import torch.nn as nn import torch.nn as nn
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_list, cfg_from_yaml_file
from pcdet.utils import common_utils from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model from train_utils.train_utils import train_model
import torch.distributed as dist
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config(): def parse_config():
parser = argparse.ArgumentParser(description='arg parser') parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=30, required=False, help='number of epochs to train for') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
...@@ -57,11 +59,21 @@ def main(): ...@@ -57,11 +59,21 @@ def main():
args, cfg = parse_config() args, cfg = parse_config()
if args.launcher == 'none': if args.launcher == 'none':
dist_train = False dist_train = False
total_gpus = 1
else: else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)( total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl' args.tcp_port, args.local_rank, backend='nccl'
) )
dist_train = True dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed: if args.fix_random_seed:
common_utils.set_random_seed(666) common_utils.set_random_seed(666)
...@@ -79,7 +91,6 @@ def main(): ...@@ -79,7 +91,6 @@ def main():
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train: if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size)) logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items(): for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val)) logger.info('{:16} {}'.format(key, val))
......
from functools import partial
import torch.nn as nn import torch.nn as nn
import torch.optim as optim import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched import torch.optim.lr_scheduler as lr_sched
from functools import partial
from .fastai_optim import OptimWrapper from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import OneCycle, CosineWarmupLR from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg): def build_optimizer(model, optim_cfg):
......
...@@ -4,8 +4,8 @@ from collections import Iterable ...@@ -4,8 +4,8 @@ from collections import Iterable
import torch import torch
from torch import nn from torch import nn
from torch.nn.utils import parameters_to_vector
from torch._utils import _unflatten_dense_tensors from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm) bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
......
# This file is modified from https://github.com/traveller59/second.pytorch # This file is modified from https://github.com/traveller59/second.pytorch
import numpy as np
import math import math
from functools import partial from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper from .fastai_optim import OptimWrapper
......
import torch
import os
import glob import glob
import os
import torch
import tqdm import tqdm
from torch.nn.utils import clip_grad_norm_ from torch.nn.utils import clip_grad_norm_
......
import numpy as np
import mayavi.mlab as mlab import mayavi.mlab as mlab
import numpy as np
import torch import torch
box_colormap = [ box_colormap = [
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment