Unverified Commit f4eef726 authored by Xin Yao's avatar Xin Yao Committed by GitHub
Browse files

[Fix core lib warning] Remove LooseVersion (#5026)

* replace LooseVersion with packaging.version

* import packaging from setuptools

* import packaging.version in dgl.utils

* format

* revert changes to apps/kg

* revert changes to _dataloading
parent 6bd81758
...@@ -3,7 +3,6 @@ from __future__ import absolute_import ...@@ -3,7 +3,6 @@ from __future__ import absolute_import
import builtins import builtins
import numbers import numbers
import os import os
from distutils.version import LooseVersion
import mxnet as mx import mxnet as mx
import mxnet.ndarray as nd import mxnet.ndarray as nd
...@@ -12,8 +11,9 @@ import numpy as np ...@@ -12,8 +11,9 @@ import numpy as np
from ... import ndarray as dglnd from ... import ndarray as dglnd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
from ...utils import version
if LooseVersion(mx.__version__) < LooseVersion("1.6.0"): if version.parse(mx.__version__) < version.parse("1.6.0"):
raise RuntimeError("DGL requires MXNet >= 1.6") raise RuntimeError("DGL requires MXNet >= 1.6")
# After MXNet 1.5, empty tensors aren't supprted by default. # After MXNet 1.5, empty tensors aren't supprted by default.
......
...@@ -2,7 +2,6 @@ from __future__ import absolute_import ...@@ -2,7 +2,6 @@ from __future__ import absolute_import
import builtins import builtins
import numbers import numbers
from distutils.version import LooseVersion
import numpy as np import numpy as np
import scipy # Weird bug in new pytorch when import scipy after import torch import scipy # Weird bug in new pytorch when import scipy after import torch
...@@ -12,8 +11,9 @@ from torch.utils import dlpack ...@@ -12,8 +11,9 @@ from torch.utils import dlpack
from ... import ndarray as nd from ... import ndarray as nd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
from ...utils import version
if LooseVersion(th.__version__) < LooseVersion("1.9.0"): if version.parse(th.__version__) < version.parse("1.9.0"):
raise RuntimeError("DGL requires PyTorch >= 1.9.0") raise RuntimeError("DGL requires PyTorch >= 1.9.0")
...@@ -425,7 +425,7 @@ def zerocopy_from_numpy(np_array): ...@@ -425,7 +425,7 @@ def zerocopy_from_numpy(np_array):
return th.as_tensor(np_array) return th.as_tensor(np_array)
if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): if version.parse(th.__version__) >= version.parse("1.10.0"):
def zerocopy_to_dgl_ndarray(data): def zerocopy_to_dgl_ndarray(data):
if data.dtype == th.bool: if data.dtype == th.bool:
......
...@@ -3,7 +3,6 @@ from __future__ import absolute_import ...@@ -3,7 +3,6 @@ from __future__ import absolute_import
import builtins import builtins
import numbers import numbers
from distutils.version import LooseVersion
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
...@@ -11,8 +10,9 @@ import tensorflow as tf ...@@ -11,8 +10,9 @@ import tensorflow as tf
from ... import ndarray as nd from ... import ndarray as nd
from ..._deprecate import kernel as K from ..._deprecate import kernel as K
from ...function.base import TargetCode from ...function.base import TargetCode
from ...utils import version
if LooseVersion(tf.__version__) < LooseVersion("2.3.0"): if version.parse(tf.__version__) < version.parse("2.3.0"):
raise RuntimeError( raise RuntimeError(
"DGL requires TensorFlow>=2.3.0 for the official DLPack support." "DGL requires TensorFlow>=2.3.0 for the official DLPack support."
) )
......
...@@ -3,7 +3,6 @@ from collections.abc import Mapping, Sequence ...@@ -3,7 +3,6 @@ from collections.abc import Mapping, Sequence
from queue import Queue, Empty, Full from queue import Queue, Empty, Full
import itertools import itertools
import threading import threading
from distutils.version import LooseVersion
import math import math
import inspect import inspect
import re import re
...@@ -23,7 +22,7 @@ from .._ffi.base import is_tensor_adaptor_enabled ...@@ -23,7 +22,7 @@ from .._ffi.base import is_tensor_adaptor_enabled
from ..heterograph import DGLGraph from ..heterograph import DGLGraph
from ..utils import ( from ..utils import (
recursive_apply, ExceptionWrapper, recursive_apply_pair, set_num_threads, get_num_threads, recursive_apply, ExceptionWrapper, recursive_apply_pair, set_num_threads, get_num_threads,
get_numa_nodes_cores, context_of, dtype_of) get_numa_nodes_cores, context_of, dtype_of, version)
from ..frame import LazyFeature from ..frame import LazyFeature
from ..storages import wrap_storage from ..storages import wrap_storage
from .base import BlockSampler, as_edge_prediction_sampler from .base import BlockSampler, as_edge_prediction_sampler
...@@ -31,7 +30,7 @@ from .. import backend as F ...@@ -31,7 +30,7 @@ from .. import backend as F
from ..distributed import DistGraph from ..distributed import DistGraph
from ..multiprocessing import call_once_and_share from ..multiprocessing import call_once_and_share
PYTORCH_VER = LooseVersion(torch.__version__) PYTORCH_VER = version.parse(torch.__version__)
PYTHON_EXIT_STATUS = False PYTHON_EXIT_STATUS = False
def _set_python_exit_flag(): def _set_python_exit_flag():
global PYTHON_EXIT_STATUS global PYTHON_EXIT_STATUS
...@@ -76,7 +75,7 @@ class _TensorizedDatasetIter(object): ...@@ -76,7 +75,7 @@ class _TensorizedDatasetIter(object):
# convert the type-ID pairs to dictionary # convert the type-ID pairs to dictionary
type_ids = batch[:, 0] type_ids = batch[:, 0]
indices = batch[:, 1] indices = batch[:, 1]
if PYTORCH_VER >= LooseVersion("1.10.0"): if PYTORCH_VER >= version.parse("1.10.0"):
_, type_ids_sortidx = torch.sort(type_ids, stable=True) _, type_ids_sortidx = torch.sort(type_ids, stable=True)
else: else:
if not self.shuffle: if not self.shuffle:
......
...@@ -6,3 +6,9 @@ from .filter import * ...@@ -6,3 +6,9 @@ from .filter import *
from .internal import * from .internal import *
from .pin_memory import * from .pin_memory import *
from .shared_mem import * from .shared_mem import *
try:
from packaging import version
except ImportError:
# If packaging isn't installed, try and use the vendored copy in setuptools
from setuptools.extern.packaging import version
from distutils.version import LooseVersion
import random import random
import unittest import unittest
...@@ -11,6 +10,7 @@ from test_utils.graph_cases import get_cases ...@@ -11,6 +10,7 @@ from test_utils.graph_cases import get_cases
import dgl import dgl
from dgl.ops import edge_softmax, gsddmm, gspmm, segment_reduce from dgl.ops import edge_softmax, gsddmm, gspmm, segment_reduce
from dgl.utils import version
random.seed(42) random.seed(42)
np.random.seed(42) np.random.seed(42)
...@@ -189,7 +189,7 @@ def test_spmm(idtype, g, shp, msg, reducer): ...@@ -189,7 +189,7 @@ def test_spmm(idtype, g, shp, msg, reducer):
[(torch.float16, 1e-3, 0.5), (torch.bfloat16, 4e-3, 2.)] [(torch.float16, 1e-3, 0.5), (torch.bfloat16, 4e-3, 2.)]
) )
def test_half_spmm(idtype, dtype, rtol, atol): def test_half_spmm(idtype, dtype, rtol, atol):
if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ if version.parse(torch.version.cuda) < version.parse("11.0") \
and dtype == torch.bfloat16: and dtype == torch.bfloat16:
pytest.skip("BF16 requires CUDA >= 11.0.") pytest.skip("BF16 requires CUDA >= 11.0.")
...@@ -373,7 +373,7 @@ def test_segment_mm(idtype, feat_size, dtype, tol): ...@@ -373,7 +373,7 @@ def test_segment_mm(idtype, feat_size, dtype, tol):
"Only support float32 and float64 on CPU." "Only support float32 and float64 on CPU."
) )
if F._default_context_str == "gpu" \ if F._default_context_str == "gpu" \
and LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and version.parse(torch.version.cuda) < version.parse("11.0") \
and dtype == torch.bfloat16: and dtype == torch.bfloat16:
pytest.skip( pytest.skip(
"BF16 requires CUDA >= 11.0." "BF16 requires CUDA >= 11.0."
...@@ -426,7 +426,7 @@ def test_gather_mm_idx_b(feat_size, dtype, tol): ...@@ -426,7 +426,7 @@ def test_gather_mm_idx_b(feat_size, dtype, tol):
if F._default_context_str == "cpu" and dtype in (torch.float16, torch.bfloat16): if F._default_context_str == "cpu" and dtype in (torch.float16, torch.bfloat16):
pytest.skip("Only support float32 and float64 on CPU.") pytest.skip("Only support float32 and float64 on CPU.")
if F._default_context_str == "gpu" \ if F._default_context_str == "gpu" \
and LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and version.parse(torch.version.cuda) < version.parse("11.0") \
and dtype == torch.bfloat16: and dtype == torch.bfloat16:
pytest.skip("BF16 requires CUDA >= 11.0.") pytest.skip("BF16 requires CUDA >= 11.0.")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment