Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nerfacc
Commits
62afb4ba
Commit
62afb4ba
authored
Nov 06, 2022
by
Ruilong Li
Browse files
Revert "unpack_info with n_samples"
This reverts commit
301b4dfa
.
parent
301b4dfa
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
12 additions
and
14 deletions
+12
-14
nerfacc/cuda/csrc/pack.cu
nerfacc/cuda/csrc/pack.cu
+2
-3
nerfacc/cuda/csrc/pybind.cu
nerfacc/cuda/csrc/pybind.cu
+1
-1
nerfacc/pack.py
nerfacc/pack.py
+3
-4
nerfacc/ray_marching.py
nerfacc/ray_marching.py
+2
-2
nerfacc/vol_rendering.py
nerfacc/vol_rendering.py
+2
-2
tests/test_pack.py
tests/test_pack.py
+1
-1
tests/test_ray_marching.py
tests/test_ray_marching.py
+1
-1
No files found.
nerfacc/cuda/csrc/pack.cu
View file @
62afb4ba
...
@@ -81,8 +81,7 @@ __global__ void unpack_data_kernel(
...
@@ -81,8 +81,7 @@ __global__ void unpack_data_kernel(
return
;
return
;
}
}
torch
::
Tensor
unpack_info
(
torch
::
Tensor
unpack_info
(
const
torch
::
Tensor
packed_info
)
const
torch
::
Tensor
packed_info
,
const
int
n_samples
)
{
{
DEVICE_GUARD
(
packed_info
);
DEVICE_GUARD
(
packed_info
);
CHECK_INPUT
(
packed_info
);
CHECK_INPUT
(
packed_info
);
...
@@ -91,7 +90,7 @@ torch::Tensor unpack_info(
...
@@ -91,7 +90,7 @@ torch::Tensor unpack_info(
const
int
threads
=
256
;
const
int
threads
=
256
;
const
int
blocks
=
CUDA_N_BLOCKS_NEEDED
(
n_rays
,
threads
);
const
int
blocks
=
CUDA_N_BLOCKS_NEEDED
(
n_rays
,
threads
);
//
int n_samples = packed_info[n_rays - 1].sum(0).item<int>();
int
n_samples
=
packed_info
[
n_rays
-
1
].
sum
(
0
).
item
<
int
>
();
torch
::
Tensor
ray_indices
=
torch
::
empty
(
torch
::
Tensor
ray_indices
=
torch
::
empty
(
{
n_samples
},
packed_info
.
options
().
dtype
(
torch
::
kInt32
));
{
n_samples
},
packed_info
.
options
().
dtype
(
torch
::
kInt32
));
...
...
nerfacc/cuda/csrc/pybind.cu
View file @
62afb4ba
...
@@ -45,7 +45,7 @@ std::vector<torch::Tensor> ray_marching(
...
@@ -45,7 +45,7 @@ std::vector<torch::Tensor> ray_marching(
const
float
cone_angle
);
const
float
cone_angle
);
torch
::
Tensor
unpack_info
(
torch
::
Tensor
unpack_info
(
const
torch
::
Tensor
packed_info
,
const
int
n_samples
);
const
torch
::
Tensor
packed_info
);
torch
::
Tensor
unpack_info_to_mask
(
torch
::
Tensor
unpack_info_to_mask
(
const
torch
::
Tensor
packed_info
,
const
int
n_samples
);
const
torch
::
Tensor
packed_info
,
const
int
n_samples
);
...
...
nerfacc/pack.py
View file @
62afb4ba
...
@@ -44,7 +44,7 @@ def pack_data(data: Tensor, mask: Tensor) -> Tuple[Tensor, Tensor]:
...
@@ -44,7 +44,7 @@ def pack_data(data: Tensor, mask: Tensor) -> Tuple[Tensor, Tensor]:
@
torch
.
no_grad
()
@
torch
.
no_grad
()
def
unpack_info
(
packed_info
:
Tensor
,
n_samples
:
int
)
->
Tensor
:
def
unpack_info
(
packed_info
:
Tensor
)
->
Tensor
:
"""Unpack `packed_info` to `ray_indices`. Useful for converting per ray data to per sample data.
"""Unpack `packed_info` to `ray_indices`. Useful for converting per ray data to per sample data.
Note:
Note:
...
@@ -53,7 +53,6 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
...
@@ -53,7 +53,6 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
Args:
Args:
packed_info: Stores information on which samples belong to the same ray.
\
packed_info: Stores information on which samples belong to the same ray.
\
See :func:`nerfacc.ray_marching` for details. Tensor with shape (n_rays, 2).
See :func:`nerfacc.ray_marching` for details. Tensor with shape (n_rays, 2).
n_samples: Total number of samples.
Returns:
Returns:
Ray index of each sample. LongTensor with shape (n_sample).
Ray index of each sample. LongTensor with shape (n_sample).
...
@@ -72,7 +71,7 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
...
@@ -72,7 +71,7 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
# torch.Size([128, 2]) torch.Size([115200, 1]) torch.Size([115200, 1])
# torch.Size([128, 2]) torch.Size([115200, 1]) torch.Size([115200, 1])
print(packed_info.shape, t_starts.shape, t_ends.shape)
print(packed_info.shape, t_starts.shape, t_ends.shape)
# Unpack per-ray info to per-sample info.
# Unpack per-ray info to per-sample info.
ray_indices = unpack_info(packed_info
, t_starts.shape[0]
)
ray_indices = unpack_info(packed_info)
# torch.Size([115200]) torch.int64
# torch.Size([115200]) torch.int64
print(ray_indices.shape, ray_indices.dtype)
print(ray_indices.shape, ray_indices.dtype)
...
@@ -81,7 +80,7 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
...
@@ -81,7 +80,7 @@ def unpack_info(packed_info: Tensor, n_samples: int) -> Tensor:
packed_info
.
dim
()
==
2
and
packed_info
.
shape
[
-
1
]
==
2
packed_info
.
dim
()
==
2
and
packed_info
.
shape
[
-
1
]
==
2
),
"packed_info must be a 2D tensor with shape (n_rays, 2)."
),
"packed_info must be a 2D tensor with shape (n_rays, 2)."
if
packed_info
.
is_cuda
:
if
packed_info
.
is_cuda
:
ray_indices
=
_C
.
unpack_info
(
packed_info
.
contiguous
().
int
()
,
n_samples
)
ray_indices
=
_C
.
unpack_info
(
packed_info
.
contiguous
().
int
())
else
:
else
:
raise
NotImplementedError
(
"Only support cuda inputs."
)
raise
NotImplementedError
(
"Only support cuda inputs."
)
return
ray_indices
.
long
()
return
ray_indices
.
long
()
...
...
nerfacc/ray_marching.py
View file @
62afb4ba
...
@@ -128,7 +128,7 @@ def ray_marching(
...
@@ -128,7 +128,7 @@ def ray_marching(
)
)
# Convert t_starts and t_ends to sample locations.
# Convert t_starts and t_ends to sample locations.
ray_indices = unpack_info(packed_info
, t_starts.shape[0]
)
ray_indices = unpack_info(packed_info)
t_mid = (t_starts + t_ends) / 2.0
t_mid = (t_starts + t_ends) / 2.0
sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]
sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]
...
@@ -197,7 +197,7 @@ def ray_marching(
...
@@ -197,7 +197,7 @@ def ray_marching(
# skip invisible space
# skip invisible space
if
sigma_fn
is
not
None
or
alpha_fn
is
not
None
:
if
sigma_fn
is
not
None
or
alpha_fn
is
not
None
:
# Query sigma without gradients
# Query sigma without gradients
ray_indices
=
unpack_info
(
packed_info
,
t_starts
.
shape
[
0
]
)
ray_indices
=
unpack_info
(
packed_info
)
if
sigma_fn
is
not
None
:
if
sigma_fn
is
not
None
:
sigmas
=
sigma_fn
(
t_starts
,
t_ends
,
ray_indices
.
long
())
sigmas
=
sigma_fn
(
t_starts
,
t_ends
,
ray_indices
.
long
())
assert
(
assert
(
...
...
nerfacc/vol_rendering.py
View file @
62afb4ba
...
@@ -96,7 +96,7 @@ def rendering(
...
@@ -96,7 +96,7 @@ def rendering(
)
)
n_rays
=
packed_info
.
shape
[
0
]
n_rays
=
packed_info
.
shape
[
0
]
ray_indices
=
unpack_info
(
packed_info
,
t_starts
.
shape
[
0
]
)
ray_indices
=
unpack_info
(
packed_info
)
# Query sigma/alpha and color with gradients
# Query sigma/alpha and color with gradients
if
rgb_sigma_fn
is
not
None
:
if
rgb_sigma_fn
is
not
None
:
...
@@ -160,7 +160,7 @@ def accumulate_along_rays(
...
@@ -160,7 +160,7 @@ def accumulate_along_rays(
weights: Volumetric rendering weights for those samples. Tensor with shape
\
weights: Volumetric rendering weights for those samples. Tensor with shape
\
(n_samples,).
(n_samples,).
ray_indices: Ray index of each sample. IntTensor with shape (n_samples).
\
ray_indices: Ray index of each sample. IntTensor with shape (n_samples).
\
It can be obtained from `unpack_info(packed_info
, n_samples
)`.
It can be obtained from `unpack_info(packed_info)`.
values: The values to be accmulated. Tensor with shape (n_samples, D). If
\
values: The values to be accmulated. Tensor with shape (n_samples, D). If
\
None, the accumulated values are just weights. Default is None.
None, the accumulated values are just weights. Default is None.
n_rays: Total number of rays. This will decide the shape of the ouputs. If
\
n_rays: Total number of rays. This will decide the shape of the ouputs. If
\
...
...
tests/test_pack.py
View file @
62afb4ba
...
@@ -31,7 +31,7 @@ def test_unpack_info():
...
@@ -31,7 +31,7 @@ def test_unpack_info():
ray_indices_tgt
=
torch
.
tensor
(
ray_indices_tgt
=
torch
.
tensor
(
[
0
,
2
,
2
,
2
,
2
],
dtype
=
torch
.
int64
,
device
=
device
[
0
,
2
,
2
,
2
,
2
],
dtype
=
torch
.
int64
,
device
=
device
)
)
ray_indices
=
unpack_info
(
packed_info
,
5
)
ray_indices
=
unpack_info
(
packed_info
)
assert
torch
.
allclose
(
ray_indices
,
ray_indices_tgt
)
assert
torch
.
allclose
(
ray_indices
,
ray_indices_tgt
)
...
...
tests/test_ray_marching.py
View file @
62afb4ba
...
@@ -39,7 +39,7 @@ def test_marching_with_grid():
...
@@ -39,7 +39,7 @@ def test_marching_with_grid():
far_plane
=
1.0
,
far_plane
=
1.0
,
render_step_size
=
1e-2
,
render_step_size
=
1e-2
,
)
)
ray_indices
=
unpack_info
(
packed_info
,
t_starts
.
shape
[
0
]
).
long
()
ray_indices
=
unpack_info
(
packed_info
).
long
()
samples
=
(
samples
=
(
rays_o
[
ray_indices
]
+
rays_d
[
ray_indices
]
*
(
t_starts
+
t_ends
)
/
2.0
rays_o
[
ray_indices
]
+
rays_d
[
ray_indices
]
*
(
t_starts
+
t_ends
)
/
2.0
)
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment