"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "64270eff34381fd02b21cf3129e38b0da155c81a"
Unverified Commit 32f12ee1 authored by Xin Yao's avatar Xin Yao Committed by GitHub
Browse files

[Doc] Unify the minimal versions required for PyTorch/TensorFlow/MXNet (#4180)

parent 5bef48df
...@@ -4,7 +4,6 @@ dependencies: ...@@ -4,7 +4,6 @@ dependencies:
- pip - pip
- pip: - pip:
- tensorflow==2.3.0 - tensorflow==2.3.0
- tfdlpack
- pytest - pytest
- nose - nose
- numpy - numpy
......
...@@ -4,7 +4,6 @@ dependencies: ...@@ -4,7 +4,6 @@ dependencies:
- pip - pip
- pip: - pip:
- tensorflow==2.3.0 - tensorflow==2.3.0
- tfdlpack-gpu
- pytest - pytest
- nose - nose
- numpy - numpy
......
...@@ -174,13 +174,13 @@ PyTorch backend ...@@ -174,13 +174,13 @@ PyTorch backend
``````````````` ```````````````
Export ``DGLBACKEND`` as ``pytorch`` to specify PyTorch backend. The required PyTorch Export ``DGLBACKEND`` as ``pytorch`` to specify PyTorch backend. The required PyTorch
version is 1.5.0 or later. See `pytorch.org <https://pytorch.org>`_ for installation instructions. version is 1.9.0 or later. See `pytorch.org <https://pytorch.org>`_ for installation instructions.
MXNet backend MXNet backend
````````````` `````````````
Export ``DGLBACKEND`` as ``mxnet`` to specify MXNet backend. The required MXNet version is Export ``DGLBACKEND`` as ``mxnet`` to specify MXNet backend. The required MXNet version is
1.5 or later. See `mxnet.apache.org <https://mxnet.apache.org/get_started>`_ for installation 1.6 or later. See `mxnet.apache.org <https://mxnet.apache.org/get_started>`_ for installation
instructions. instructions.
MXNet uses uint32 as the default data type for integer tensors, which only supports graph of MXNet uses uint32 as the default data type for integer tensors, which only supports graph of
...@@ -196,10 +196,6 @@ Tensorflow backend ...@@ -196,10 +196,6 @@ Tensorflow backend
`````````````````` ``````````````````
Export ``DGLBACKEND`` as ``tensorflow`` to specify Tensorflow backend. The required Tensorflow Export ``DGLBACKEND`` as ``tensorflow`` to specify Tensorflow backend. The required Tensorflow
version is 2.2.0 or later. See `tensorflow.org <https://www.tensorflow.org/install>`_ for installation version is 2.3.0 or later. See `tensorflow.org <https://www.tensorflow.org/install>`_ for installation
instructions. In addition, DGL will set ``TF_FORCE_GPU_ALLOW_GROWTH`` to ``true`` to prevent Tensorflow take over the whole GPU memory: instructions. In addition, DGL will set ``TF_FORCE_GPU_ALLOW_GROWTH`` to ``true`` to prevent Tensorflow take over the whole GPU memory:
.. code:: bash
pip install "tensorflow>=2.2.0" # when using tensorflow cpu version
...@@ -12,9 +12,8 @@ from ... import ndarray as dglnd ...@@ -12,9 +12,8 @@ from ... import ndarray as dglnd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
MX_VERSION = LooseVersion(mx.__version__) if LooseVersion(mx.__version__) < LooseVersion("1.6.0"):
if MX_VERSION.version[0] == 1 and MX_VERSION.version[1] < 5: raise RuntimeError("DGL requires MXNet >= 1.6")
raise RuntimeError("DGL requires mxnet >= 1.5")
# After MXNet 1.5, empty tensors aren't supprted by default. # After MXNet 1.5, empty tensors aren't supprted by default.
# After we turn on the numpy compatible flag, MXNet supports empty NDArray. # After we turn on the numpy compatible flag, MXNet supports empty NDArray.
...@@ -214,19 +213,9 @@ def split(x, sizes_or_sections, dim): ...@@ -214,19 +213,9 @@ def split(x, sizes_or_sections, dim):
assert len(x) == sizes_or_sections[0] assert len(x) == sizes_or_sections[0]
return [x] return [x]
if MX_VERSION.version[0] == 1 and MX_VERSION.version[1] >= 5: if isinstance(sizes_or_sections, (np.ndarray, list)):
if isinstance(sizes_or_sections, (np.ndarray, list)): sizes_or_sections1 = tuple(np.cumsum(sizes_or_sections)[:-1])
sizes_or_sections1 = tuple(np.cumsum(sizes_or_sections)[:-1]) return nd.split_v2(x, sizes_or_sections1, axis=dim)
return nd.split_v2(x, sizes_or_sections1, axis=dim)
if isinstance(sizes_or_sections, list) or isinstance(sizes_or_sections, np.ndarray):
# Old MXNet doesn't support split with different section sizes.
np_arr = x.asnumpy()
indices = np.cumsum(sizes_or_sections)[:-1]
res = np.split(np_arr, indices, axis=dim)
return [tensor(arr, dtype=x.dtype) for arr in res]
else:
return nd.split(x, sizes_or_sections, axis=dim)
def repeat(input, repeats, dim): def repeat(input, repeats, dim):
if isinstance(repeats, nd.NDArray): if isinstance(repeats, nd.NDArray):
......
import torch as th import torch as th
from distutils.version import LooseVersion from torch.cuda.amp import custom_fwd, custom_bwd
from ...base import is_all, ALL from ...base import is_all, ALL
from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_reduce, _bwd_segment_cmp from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_reduce, _bwd_segment_cmp
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
...@@ -8,26 +8,6 @@ from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_r ...@@ -8,26 +8,6 @@ from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_r
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
from ...heterograph_index import create_unitgraph_from_csr from ...heterograph_index import create_unitgraph_from_csr
if LooseVersion(th.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import custom_fwd, custom_bwd
else:
import functools
"""PyTorch natively supports automatic mixed precision in DGL 1.6, we redefine
the custom_fwd and custom_bwd function to be compatible with DGL 1.5.
"""
def custom_fwd(**kwargs):
def custom_fwd_inner(fwd):
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
return fwd(*args, **kwargs)
return decorate_fwd
return custom_fwd_inner
def custom_bwd(bwd):
@functools.wraps(bwd)
def decorate_bwd(*args, **kwargs):
return bwd(*args, **kwargs)
return decorate_bwd
__all__ = ['gspmm', 'gsddmm', 'gspmm_hetero', 'gsddmm_hetero', 'edge_softmax', 'edge_softmax_hetero', __all__ = ['gspmm', 'gsddmm', 'gspmm_hetero', 'gsddmm_hetero', 'edge_softmax', 'edge_softmax_hetero',
'segment_reduce', 'scatter_add', 'csrmm', 'csrsum', 'csrmask', 'gather_mm', 'segment_mm'] 'segment_reduce', 'scatter_add', 'csrmm', 'csrsum', 'csrmask', 'gather_mm', 'segment_mm']
......
...@@ -12,11 +12,9 @@ from torch.utils import dlpack ...@@ -12,11 +12,9 @@ from torch.utils import dlpack
from ... import ndarray as nd from ... import ndarray as nd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
from ...base import dgl_warning
if LooseVersion(th.__version__) < LooseVersion("1.8.0"): if LooseVersion(th.__version__) < LooseVersion("1.9.0"):
raise Exception("Detected an old version of PyTorch. Please update torch>=1.8.0 " raise RuntimeError("DGL requires PyTorch >= 1.9.0")
"for the best experience.")
def data_type_dict(): def data_type_dict():
return {'float16' : th.float16, return {'float16' : th.float16,
......
...@@ -4,44 +4,25 @@ from __future__ import absolute_import ...@@ -4,44 +4,25 @@ from __future__ import absolute_import
from distutils.version import LooseVersion from distutils.version import LooseVersion
import tensorflow as tf import tensorflow as tf
from tensorflow.python.eager import context
import builtins import builtins
import numbers import numbers
import numpy as np import numpy as np
import os
from ... import ndarray as nd from ... import ndarray as nd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
if not os.getenv("USE_TFDLPACK", False): if LooseVersion(tf.__version__) < LooseVersion("2.3.0"):
if LooseVersion(tf.__version__) < LooseVersion("2.2.0"): raise RuntimeError("DGL requires TensorFlow>=2.3.0 for the official DLPack support.")
raise RuntimeError("DGL requires tensorflow>=2.2.0 for the official DLPack support.")
def zerocopy_to_dlpack(data):
def zerocopy_to_dlpack(data): return tf.experimental.dlpack.to_dlpack(data)
return tf.experimental.dlpack.to_dlpack(data)
def zerocopy_from_dlpack(dlpack_tensor):
def zerocopy_from_dlpack(dlpack_tensor): # TODO(Jinjing): Tensorflow requires memory to be 64-bytes aligned. We check the
# TODO(Jinjing): Tensorflow requires memory to be 64-bytes aligned. We check the # alignment and make a copy if needed. The functionality is better in TF's main repo.
# alignment and make a copy if needed. The functionality is better in TF's main repo. aligned = nd.from_dlpack(dlpack_tensor).to_dlpack(64)
aligned = nd.from_dlpack(dlpack_tensor).to_dlpack(64) return tf.experimental.dlpack.from_dlpack(aligned)
return tf.experimental.dlpack.from_dlpack(aligned)
else:
# Use our own DLPack solution
try:
import tfdlpack
except ImportError:
raise ImportError('Cannot find tfdlpack, which is required by the Tensorflow backend. '
'Please follow https://github.com/VoVAllen/tf-dlpack for installation.')
if LooseVersion(tf.__version__) < LooseVersion("2.1.0"):
raise RuntimeError("DGL requires tensorflow>=2.1.0.")
def zerocopy_to_dlpack(input):
return tfdlpack.to_dlpack(input)
def zerocopy_from_dlpack(input):
return tfdlpack.from_dlpack(input)
def data_type_dict(): def data_type_dict():
......
...@@ -30,6 +30,7 @@ from ..distributed import DistGraph ...@@ -30,6 +30,7 @@ from ..distributed import DistGraph
from ..multiprocessing import call_once_and_share from ..multiprocessing import call_once_and_share
from ..cuda import stream as dgl_stream from ..cuda import stream as dgl_stream
PYTORCH_VER = LooseVersion(torch.__version__)
PYTHON_EXIT_STATUS = False PYTHON_EXIT_STATUS = False
def _set_python_exit_flag(): def _set_python_exit_flag():
global PYTHON_EXIT_STATUS global PYTHON_EXIT_STATUS
...@@ -972,19 +973,13 @@ class EdgeDataLoader(DataLoader): ...@@ -972,19 +973,13 @@ class EdgeDataLoader(DataLoader):
# GraphDataLoader loads a set of graphs so it's not relevant to the above. They are currently # GraphDataLoader loads a set of graphs so it's not relevant to the above. They are currently
# copied from the old DataLoader implementation. # copied from the old DataLoader implementation.
PYTORCH_VER = LooseVersion(torch.__version__)
PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0")
PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0")
def _create_dist_sampler(dataset, dataloader_kwargs, ddp_seed): def _create_dist_sampler(dataset, dataloader_kwargs, ddp_seed):
# Note: will change the content of dataloader_kwargs # Note: will change the content of dataloader_kwargs
dist_sampler_kwargs = {'shuffle': dataloader_kwargs.get('shuffle', False)} dist_sampler_kwargs = {'shuffle': dataloader_kwargs.get('shuffle', False)}
dataloader_kwargs['shuffle'] = False dataloader_kwargs['shuffle'] = False
if PYTORCH_16: dist_sampler_kwargs['seed'] = ddp_seed
dist_sampler_kwargs['seed'] = ddp_seed dist_sampler_kwargs['drop_last'] = dataloader_kwargs.get('drop_last', False)
if PYTORCH_17: dataloader_kwargs['drop_last'] = False
dist_sampler_kwargs['drop_last'] = dataloader_kwargs.get('drop_last', False)
dataloader_kwargs['drop_last'] = False
return DistributedSampler(dataset, **dist_sampler_kwargs) return DistributedSampler(dataset, **dist_sampler_kwargs)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment