Unverified Commit 32f12ee1 authored by Xin Yao's avatar Xin Yao Committed by GitHub
Browse files

[Doc] Unify the minimal versions required for PyTorch/TensorFlow/MXNet (#4180)

parent 5bef48df
......@@ -4,7 +4,6 @@ dependencies:
- pip
- pip:
- tensorflow==2.3.0
- tfdlpack
- pytest
- nose
- numpy
......
......@@ -4,7 +4,6 @@ dependencies:
- pip
- pip:
- tensorflow==2.3.0
- tfdlpack-gpu
- pytest
- nose
- numpy
......
......@@ -174,13 +174,13 @@ PyTorch backend
```````````````
Export ``DGLBACKEND`` as ``pytorch`` to specify PyTorch backend. The required PyTorch
version is 1.5.0 or later. See `pytorch.org <https://pytorch.org>`_ for installation instructions.
version is 1.9.0 or later. See `pytorch.org <https://pytorch.org>`_ for installation instructions.
MXNet backend
`````````````
Export ``DGLBACKEND`` as ``mxnet`` to specify MXNet backend. The required MXNet version is
1.5 or later. See `mxnet.apache.org <https://mxnet.apache.org/get_started>`_ for installation
1.6 or later. See `mxnet.apache.org <https://mxnet.apache.org/get_started>`_ for installation
instructions.
MXNet uses uint32 as the default data type for integer tensors, which only supports graph of
......@@ -196,10 +196,6 @@ Tensorflow backend
``````````````````
Export ``DGLBACKEND`` as ``tensorflow`` to specify Tensorflow backend. The required Tensorflow
version is 2.2.0 or later. See `tensorflow.org <https://www.tensorflow.org/install>`_ for installation
version is 2.3.0 or later. See `tensorflow.org <https://www.tensorflow.org/install>`_ for installation
instructions. In addition, DGL will set ``TF_FORCE_GPU_ALLOW_GROWTH`` to ``true`` to prevent Tensorflow take over the whole GPU memory:
.. code:: bash
pip install "tensorflow>=2.2.0" # when using tensorflow cpu version
......@@ -12,9 +12,8 @@ from ... import ndarray as dglnd
from ..._deprecate import kernel as K
from ...function.base import TargetCode
MX_VERSION = LooseVersion(mx.__version__)
if MX_VERSION.version[0] == 1 and MX_VERSION.version[1] < 5:
raise RuntimeError("DGL requires mxnet >= 1.5")
if LooseVersion(mx.__version__) < LooseVersion("1.6.0"):
raise RuntimeError("DGL requires MXNet >= 1.6")
# After MXNet 1.5, empty tensors aren't supprted by default.
# After we turn on the numpy compatible flag, MXNet supports empty NDArray.
......@@ -214,19 +213,9 @@ def split(x, sizes_or_sections, dim):
assert len(x) == sizes_or_sections[0]
return [x]
if MX_VERSION.version[0] == 1 and MX_VERSION.version[1] >= 5:
if isinstance(sizes_or_sections, (np.ndarray, list)):
sizes_or_sections1 = tuple(np.cumsum(sizes_or_sections)[:-1])
return nd.split_v2(x, sizes_or_sections1, axis=dim)
if isinstance(sizes_or_sections, list) or isinstance(sizes_or_sections, np.ndarray):
# Old MXNet doesn't support split with different section sizes.
np_arr = x.asnumpy()
indices = np.cumsum(sizes_or_sections)[:-1]
res = np.split(np_arr, indices, axis=dim)
return [tensor(arr, dtype=x.dtype) for arr in res]
else:
return nd.split(x, sizes_or_sections, axis=dim)
if isinstance(sizes_or_sections, (np.ndarray, list)):
sizes_or_sections1 = tuple(np.cumsum(sizes_or_sections)[:-1])
return nd.split_v2(x, sizes_or_sections1, axis=dim)
def repeat(input, repeats, dim):
if isinstance(repeats, nd.NDArray):
......
import torch as th
from distutils.version import LooseVersion
from torch.cuda.amp import custom_fwd, custom_bwd
from ...base import is_all, ALL
from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_reduce, _bwd_segment_cmp
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
......@@ -8,26 +8,6 @@ from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_r
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
from ...heterograph_index import create_unitgraph_from_csr
if LooseVersion(th.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import custom_fwd, custom_bwd
else:
import functools
"""PyTorch natively supports automatic mixed precision in DGL 1.6, we redefine
the custom_fwd and custom_bwd function to be compatible with DGL 1.5.
"""
def custom_fwd(**kwargs):
def custom_fwd_inner(fwd):
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
return fwd(*args, **kwargs)
return decorate_fwd
return custom_fwd_inner
def custom_bwd(bwd):
@functools.wraps(bwd)
def decorate_bwd(*args, **kwargs):
return bwd(*args, **kwargs)
return decorate_bwd
__all__ = ['gspmm', 'gsddmm', 'gspmm_hetero', 'gsddmm_hetero', 'edge_softmax', 'edge_softmax_hetero',
'segment_reduce', 'scatter_add', 'csrmm', 'csrsum', 'csrmask', 'gather_mm', 'segment_mm']
......
......@@ -12,11 +12,9 @@ from torch.utils import dlpack
from ... import ndarray as nd
from ..._deprecate import kernel as K
from ...function.base import TargetCode
from ...base import dgl_warning
if LooseVersion(th.__version__) < LooseVersion("1.8.0"):
raise Exception("Detected an old version of PyTorch. Please update torch>=1.8.0 "
"for the best experience.")
if LooseVersion(th.__version__) < LooseVersion("1.9.0"):
raise RuntimeError("DGL requires PyTorch >= 1.9.0")
def data_type_dict():
return {'float16' : th.float16,
......
......@@ -4,44 +4,25 @@ from __future__ import absolute_import
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow.python.eager import context
import builtins
import numbers
import numpy as np
import os
from ... import ndarray as nd
from ..._deprecate import kernel as K
from ...function.base import TargetCode
if not os.getenv("USE_TFDLPACK", False):
if LooseVersion(tf.__version__) < LooseVersion("2.2.0"):
raise RuntimeError("DGL requires tensorflow>=2.2.0 for the official DLPack support.")
def zerocopy_to_dlpack(data):
return tf.experimental.dlpack.to_dlpack(data)
def zerocopy_from_dlpack(dlpack_tensor):
# TODO(Jinjing): Tensorflow requires memory to be 64-bytes aligned. We check the
# alignment and make a copy if needed. The functionality is better in TF's main repo.
aligned = nd.from_dlpack(dlpack_tensor).to_dlpack(64)
return tf.experimental.dlpack.from_dlpack(aligned)
else:
# Use our own DLPack solution
try:
import tfdlpack
except ImportError:
raise ImportError('Cannot find tfdlpack, which is required by the Tensorflow backend. '
'Please follow https://github.com/VoVAllen/tf-dlpack for installation.')
if LooseVersion(tf.__version__) < LooseVersion("2.1.0"):
raise RuntimeError("DGL requires tensorflow>=2.1.0.")
def zerocopy_to_dlpack(input):
return tfdlpack.to_dlpack(input)
def zerocopy_from_dlpack(input):
return tfdlpack.from_dlpack(input)
if LooseVersion(tf.__version__) < LooseVersion("2.3.0"):
raise RuntimeError("DGL requires TensorFlow>=2.3.0 for the official DLPack support.")
def zerocopy_to_dlpack(data):
return tf.experimental.dlpack.to_dlpack(data)
def zerocopy_from_dlpack(dlpack_tensor):
# TODO(Jinjing): Tensorflow requires memory to be 64-bytes aligned. We check the
# alignment and make a copy if needed. The functionality is better in TF's main repo.
aligned = nd.from_dlpack(dlpack_tensor).to_dlpack(64)
return tf.experimental.dlpack.from_dlpack(aligned)
def data_type_dict():
......
......@@ -30,6 +30,7 @@ from ..distributed import DistGraph
from ..multiprocessing import call_once_and_share
from ..cuda import stream as dgl_stream
PYTORCH_VER = LooseVersion(torch.__version__)
PYTHON_EXIT_STATUS = False
def _set_python_exit_flag():
global PYTHON_EXIT_STATUS
......@@ -972,19 +973,13 @@ class EdgeDataLoader(DataLoader):
# GraphDataLoader loads a set of graphs so it's not relevant to the above. They are currently
# copied from the old DataLoader implementation.
PYTORCH_VER = LooseVersion(torch.__version__)
PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0")
PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0")
def _create_dist_sampler(dataset, dataloader_kwargs, ddp_seed):
# Note: will change the content of dataloader_kwargs
dist_sampler_kwargs = {'shuffle': dataloader_kwargs.get('shuffle', False)}
dataloader_kwargs['shuffle'] = False
if PYTORCH_16:
dist_sampler_kwargs['seed'] = ddp_seed
if PYTORCH_17:
dist_sampler_kwargs['drop_last'] = dataloader_kwargs.get('drop_last', False)
dataloader_kwargs['drop_last'] = False
dist_sampler_kwargs['seed'] = ddp_seed
dist_sampler_kwargs['drop_last'] = dataloader_kwargs.get('drop_last', False)
dataloader_kwargs['drop_last'] = False
return DistributedSampler(dataset, **dist_sampler_kwargs)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment