Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-harmonics
Commits
b2ce5906
Unverified
Commit
b2ce5906
authored
Aug 29, 2024
by
Thorsten Kurth
Committed by
GitHub
Aug 29, 2024
Browse files
adding routines for cleaning up distributed process groups (#50)
parent
24fcb06e
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
22 additions
and
4 deletions
+22
-4
Dockerfile
Dockerfile
+3
-2
tests/test_distributed_convolution.py
tests/test_distributed_convolution.py
+6
-1
tests/test_distributed_sht.py
tests/test_distributed_sht.py
+5
-0
torch_harmonics/distributed/__init__.py
torch_harmonics/distributed/__init__.py
+1
-1
torch_harmonics/distributed/utils.py
torch_harmonics/distributed/utils.py
+7
-0
No files found.
Dockerfile
View file @
b2ce5906
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
# build after cloning in directoy torch_harmonics via
# build after cloning in directoy torch_harmonics via
# docker build . -t torch_harmonics
# docker build . -t torch_harmonics
FROM
nvcr.io/nvidia/pytorch:24.0
7
-py3
FROM
nvcr.io/nvidia/pytorch:24.0
8
-py3
COPY
. /workspace/torch_harmonics
COPY
. /workspace/torch_harmonics
...
@@ -38,6 +38,7 @@ COPY . /workspace/torch_harmonics
...
@@ -38,6 +38,7 @@ COPY . /workspace/torch_harmonics
RUN
pip
install
parameterized
RUN
pip
install
parameterized
# The custom CUDA extension does not suppport architerctures < 7.0
# The custom CUDA extension does not suppport architerctures < 7.0
ENV
FORCE_CUDA_EXTENSION=1
ENV
TORCH_CUDA_ARCH_LIST "7.0 7.2 7.5 8.0 8.6 8.7 9.0+PTX"
ENV
TORCH_CUDA_ARCH_LIST "7.0 7.2 7.5 8.0 8.6 8.7 9.0+PTX"
RUN
pip
install
--global-option
--cuda_ext
/workspace/torch_harmonics
RUN
cd
/workspace/torch_harmonics
&&
pip
install
--no-build-isolation
.
tests/test_distributed_convolution.py
View file @
b2ce5906
...
@@ -112,6 +112,11 @@ class TestDistributedDiscreteContinuousConvolution(unittest.TestCase):
...
@@ -112,6 +112,11 @@ class TestDistributedDiscreteContinuousConvolution(unittest.TestCase):
# initializing sht
# initializing sht
thd
.
init
(
cls
.
h_group
,
cls
.
w_group
)
thd
.
init
(
cls
.
h_group
,
cls
.
w_group
)
@
classmethod
def
tearDownClass
(
cls
):
thd
.
finalize
()
dist
.
destroy_process_group
(
None
)
def
_split_helper
(
self
,
tensor
):
def
_split_helper
(
self
,
tensor
):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
# split in W
# split in W
...
@@ -185,7 +190,7 @@ class TestDistributedDiscreteContinuousConvolution(unittest.TestCase):
...
@@ -185,7 +190,7 @@ class TestDistributedDiscreteContinuousConvolution(unittest.TestCase):
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
2
,
"equiangular"
,
"equiangular"
,
False
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
2
,
"equiangular"
,
"equiangular"
,
False
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
6
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
False
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
6
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
False
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
129
,
256
,
12
8
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
129
,
256
,
12
9
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
,
2
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
,
2
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
64
,
128
,
128
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
64
,
128
,
128
,
256
,
32
,
8
,
[
3
],
1
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
2
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
[
128
,
256
,
128
,
256
,
32
,
8
,
[
3
],
2
,
"equiangular"
,
"equiangular"
,
True
,
1e-5
],
...
...
tests/test_distributed_sht.py
View file @
b2ce5906
...
@@ -118,6 +118,11 @@ class TestDistributedSphericalHarmonicTransform(unittest.TestCase):
...
@@ -118,6 +118,11 @@ class TestDistributedSphericalHarmonicTransform(unittest.TestCase):
# initializing sht
# initializing sht
thd
.
init
(
cls
.
h_group
,
cls
.
w_group
)
thd
.
init
(
cls
.
h_group
,
cls
.
w_group
)
@
classmethod
def
tearDownClass
(
cls
):
thd
.
finalize
()
dist
.
destroy_process_group
(
None
)
def
_split_helper
(
self
,
tensor
):
def
_split_helper
(
self
,
tensor
):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
...
...
torch_harmonics/distributed/__init__.py
View file @
b2ce5906
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
#
#
# we need this in order to enable distributed
# we need this in order to enable distributed
from
.utils
import
init
,
is_initialized
,
polar_group
,
azimuth_group
from
.utils
import
init
,
finalize
,
is_initialized
,
polar_group
,
azimuth_group
from
.utils
import
polar_group_size
,
azimuth_group_size
,
polar_group_rank
,
azimuth_group_rank
from
.utils
import
polar_group_size
,
azimuth_group_size
,
polar_group_rank
,
azimuth_group_rank
from
.primitives
import
compute_split_shapes
,
split_tensor_along_dim
from
.primitives
import
compute_split_shapes
,
split_tensor_along_dim
from
.primitives
import
(
from
.primitives
import
(
...
...
torch_harmonics/distributed/utils.py
View file @
b2ce5906
...
@@ -51,6 +51,13 @@ def init(polar_process_group, azimuth_process_group):
...
@@ -51,6 +51,13 @@ def init(polar_process_group, azimuth_process_group):
_AZIMUTH_PARALLEL_GROUP
=
azimuth_process_group
_AZIMUTH_PARALLEL_GROUP
=
azimuth_process_group
_IS_INITIALIZED
=
True
_IS_INITIALIZED
=
True
def
finalize
():
if
is_initialized
():
if
is_distributed_polar
():
dist
.
destroy_process_group
(
_POLAR_PARALLEL_GROUP
)
if
is_distributed_azimuth
():
ist
.
destroy_process_group
(
_AZIMUTH_PARALLEL_GROUP
)
def
is_initialized
()
->
bool
:
def
is_initialized
()
->
bool
:
return
_IS_INITIALIZED
return
_IS_INITIALIZED
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment