Commit 2c93d25a authored by Shaoshuai Shi's avatar Shaoshuai Shi
Browse files

fix some warnings, set default workers=4, depend on torchvision/kornia (CaDDN) softly

parent 3365a5e8
...@@ -6,7 +6,8 @@ try: ...@@ -6,7 +6,8 @@ try:
from kornia.geometry.linalg import transform_points from kornia.geometry.linalg import transform_points
except Exception as e: except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions. # Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
print('Warning: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.') # print('Warning: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
pass
from pcdet.utils import transform_utils from pcdet.utils import transform_utils
...@@ -22,6 +23,14 @@ class FrustumGridGenerator(nn.Module): ...@@ -22,6 +23,14 @@ class FrustumGridGenerator(nn.Module):
disc_cfg: EasyDict, Depth discretiziation configuration disc_cfg: EasyDict, Depth discretiziation configuration
""" """
super().__init__() super().__init__()
try:
import kornia
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
print('Error: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. '
'Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
exit(-1)
self.dtype = torch.float32 self.dtype = torch.float32
self.grid_size = torch.as_tensor(grid_size, dtype=self.dtype) self.grid_size = torch.as_tensor(grid_size, dtype=self.dtype)
self.pc_range = pc_range self.pc_range = pc_range
......
import torchvision
from .ddn_template import DDNTemplate from .ddn_template import DDNTemplate
try:
import torchvision
except:
pass
class DDNDeepLabV3(DDNTemplate): class DDNDeepLabV3(DDNTemplate):
......
...@@ -2,11 +2,9 @@ from collections import OrderedDict ...@@ -2,11 +2,9 @@ from collections import OrderedDict
from pathlib import Path from pathlib import Path
from torch import hub from torch import hub
import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import torchvision
try: try:
from kornia.enhance.normalize import normalize from kornia.enhance.normalize import normalize
......
...@@ -24,7 +24,7 @@ def parse_config(): ...@@ -24,7 +24,7 @@ def parse_config():
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
...@@ -170,8 +170,6 @@ def main(): ...@@ -170,8 +170,6 @@ def main():
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
) )
import pdb
pdb.set_trace()
if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory: if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory:
train_set.clean_shared_memory() train_set.clean_shared_memory()
......
# This file is modified from https://github.com/traveller59/second.pytorch # This file is modified from https://github.com/traveller59/second.pytorch
from collections import Iterable try:
from collections.abc import Iterable
except:
from collections import Iterable
import torch import torch
from torch import nn from torch import nn
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment