Unverified Commit 4375c2d7 authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

[Doc] Fix documentation in dgl.multiprocessing namespace (#3929)

* fix docs

* remove

* oh

* fix
parent f5bba284
......@@ -17,6 +17,5 @@ In addition, if your backend is PyTorch, this module will also be compatible wit
.. autosummary::
:toctree: ../../generated/
spawn
call_once_and_share
shared_tensor
......@@ -9,7 +9,7 @@ from .. import backend as F
if F.get_preferred_backend() == 'pytorch':
# Wrap around torch.multiprocessing...
from torch.multiprocessing import *
# ... and override the Process initializer and spawn function.
# ... and override the Process initializer.
from .pytorch import *
else:
# Just import multiprocessing module.
......
......@@ -44,10 +44,8 @@ def _get_shared_mem_name(id_):
return "shared" + str(id_)
def call_once_and_share(func, shape, dtype, rank=0):
"""Invoke the function in a single process of the process group spawned by
:func:`spawn`, and share the result to other processes.
Requires the subprocesses to be spawned with :func:`dgl.multiprocessing.pytorch.spawn`.
"""Invoke the function in a single process of the PyTorch distributed process group,
and share the result with other processes.
Parameters
----------
......@@ -89,7 +87,7 @@ def call_once_and_share(func, shape, dtype, rank=0):
def shared_tensor(shape, dtype=torch.float32):
"""Create a tensor in shared memory accessible by all processes within the same
``torch.distsributed`` process group.
``torch.distributed`` process group.
The content is uninitialized.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment