Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
MMCV
Commits
0fb07d0e
Unverified
Commit
0fb07d0e
authored
Mar 03, 2023
by
liuhw
Committed by
GitHub
Mar 03, 2023
Browse files
[Feature] Add the support of voxelization op for ascend device (#2614)
* Add voxelization op npu adaptor * update * update
parent
998e4597
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
97 additions
and
2 deletions
+97
-2
docs/en/understand_mmcv/ops.md
docs/en/understand_mmcv/ops.md
+1
-1
docs/zh_cn/understand_mmcv/ops.md
docs/zh_cn/understand_mmcv/ops.md
+1
-1
mmcv/ops/csrc/pytorch/npu/voxelization_npu.cpp
mmcv/ops/csrc/pytorch/npu/voxelization_npu.cpp
+59
-0
tests/test_ops/test_voxelization.py
tests/test_ops/test_voxelization.py
+36
-0
No files found.
docs/en/understand_mmcv/ops.md
View file @
0fb07d0e
...
...
@@ -58,6 +58,6 @@ We implement common ops used in detection, segmentation, etc.
| ThreeNN | | √ | √ | | |
| TINShift | | √ | √ | | |
| UpFirDn2d | | √ | | | |
| Voxelization | √ | √ | | |
|
| Voxelization | √ | √ | | |
√
|
| PrRoIPool | | √ | | | |
| BezierAlign | √ | √ | | | |
docs/zh_cn/understand_mmcv/ops.md
View file @
0fb07d0e
...
...
@@ -58,6 +58,6 @@ MMCV 提供了检测、分割等任务中常用的算子
| ThreeNN | | √ | √ | | |
| TINShift | | √ | √ | | |
| UpFirDn2d | | √ | | | |
| Voxelization | √ | √ | | |
|
| Voxelization | √ | √ | | |
√
|
| PrRoIPool | | √ | | | |
| BezierAlign | √ | √ | | | |
mmcv/ops/csrc/pytorch/npu/voxelization_npu.cpp
0 → 100644
View file @
0fb07d0e
#include "pytorch_npu_helper.hpp"
using
namespace
NPU_NAME_SPACE
;
using
namespace
std
;
int
hard_voxelize_forward_impl
(
const
at
::
Tensor
&
points
,
at
::
Tensor
&
voxels
,
at
::
Tensor
&
coors
,
at
::
Tensor
&
num_points_per_voxel
,
const
std
::
vector
<
float
>
voxel_size
,
const
std
::
vector
<
float
>
coors_range
,
const
int
max_points
,
const
int
max_voxels
,
const
int
NDim
=
3
);
int
hard_voxelize_forward_npu
(
const
at
::
Tensor
&
points
,
at
::
Tensor
&
voxels
,
at
::
Tensor
&
coors
,
at
::
Tensor
&
num_points_per_voxel
,
const
std
::
vector
<
float
>
voxel_size
,
const
std
::
vector
<
float
>
coors_range
,
const
int
max_points
,
const
int
max_voxels
,
const
int
NDim
=
3
)
{
at
::
Tensor
voxel_num_tmp
=
OpPreparation
::
ApplyTensor
(
points
,
{
1
});
at
::
Tensor
voxel_num
=
at_npu
::
native
::
NPUNativeFunctions
::
npu_dtype_cast
(
voxel_num_tmp
,
at
::
kInt
);
at
::
Tensor
voxel_size_cpu
=
at
::
from_blob
(
const_cast
<
float
*>
(
voxel_size
.
data
()),
{
3
},
dtype
(
at
::
kFloat
));
at
::
Tensor
voxel_size_npu
=
CalcuOpUtil
::
CopyTensorHostToDevice
(
voxel_size_cpu
);
at
::
Tensor
coors_range_cpu
=
at
::
from_blob
(
const_cast
<
float
*>
(
coors_range
.
data
()),
{
6
},
dtype
(
at
::
kFloat
));
at
::
Tensor
coors_range_npu
=
CalcuOpUtil
::
CopyTensorHostToDevice
(
coors_range_cpu
);
int64_t
max_points_
=
(
int64_t
)
max_points
;
int64_t
max_voxels_
=
(
int64_t
)
max_voxels
;
// only support true now
bool
deterministic
=
true
;
OpCommand
cmd
;
cmd
.
Name
(
"Voxelization"
)
.
Input
(
points
)
.
Input
(
voxel_size_npu
)
.
Input
(
coors_range_npu
)
.
Output
(
voxels
)
.
Output
(
coors
)
.
Output
(
num_points_per_voxel
)
.
Output
(
voxel_num
)
.
Attr
(
"max_points"
,
max_points_
)
.
Attr
(
"max_voxels"
,
max_voxels_
)
.
Attr
(
"deterministic"
,
deterministic
)
.
Run
();
auto
voxel_num_cpu
=
voxel_num
.
to
(
at
::
kCPU
);
int
voxel_num_int
=
voxel_num_cpu
.
data_ptr
<
int
>
()[
0
];
return
voxel_num_int
;
}
REGISTER_NPU_IMPL
(
hard_voxelize_forward_impl
,
hard_voxelize_forward_npu
);
tests/test_ops/test_voxelization.py
View file @
0fb07d0e
...
...
@@ -4,6 +4,7 @@ import pytest
import
torch
from
mmcv.ops
import
Voxelization
from
mmcv.utils
import
IS_NPU_AVAILABLE
def
_get_voxel_points_indices
(
points
,
coors
,
voxel
):
...
...
@@ -137,3 +138,38 @@ def test_voxelization_nondeterministic():
coors_all_set
=
{
tuple
(
c
)
for
c
in
coors_all
}
assert
len
(
coors_set
)
==
len
(
coors
)
==
len
(
coors_all_set
)
@
pytest
.
mark
.
parametrize
(
'device_type'
,
[
pytest
.
param
(
'npu'
,
marks
=
pytest
.
mark
.
skipif
(
not
IS_NPU_AVAILABLE
,
reason
=
'requires NPU support'
))
])
def
test_voxelization_npu
(
device_type
):
voxel_size
=
[
0.5
,
0.5
,
0.5
]
point_cloud_range
=
[
0
,
-
40
,
-
3
,
70.4
,
40
,
1
]
voxel_dict
=
np
.
load
(
'tests/data/for_3d_ops/test_voxel.npy'
,
allow_pickle
=
True
).
item
()
expected_coors
=
voxel_dict
[
'coors'
]
expected_voxels
=
voxel_dict
[
'voxels'
]
expected_num_points_per_voxel
=
voxel_dict
[
'num_points_per_voxel'
]
points
=
voxel_dict
[
'points'
]
points
=
torch
.
tensor
(
points
)
max_num_points
=
1000
hard_voxelization
=
Voxelization
(
voxel_size
,
point_cloud_range
,
max_num_points
)
device
=
torch
.
device
(
device_type
)
# test hard_voxelization on npu
points
=
points
.
contiguous
().
to
(
device
)
coors
,
voxels
,
num_points_per_voxel
=
hard_voxelization
.
forward
(
points
)
coors
=
coors
.
cpu
().
detach
().
numpy
()
voxels
=
voxels
.
cpu
().
detach
().
numpy
()
num_points_per_voxel
=
num_points_per_voxel
.
cpu
().
detach
().
numpy
()
assert
np
.
all
(
coors
==
expected_coors
)
assert
np
.
all
(
voxels
==
expected_voxels
)
assert
np
.
all
(
num_points_per_voxel
==
expected_num_points_per_voxel
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment