Unverified Commit 89a4cc4d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Black auto fix. (#4694)


Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 303b150f
import backend as F
import mxnet as mx import mxnet as mx
import numpy as np
from dgl.geometry import farthest_point_sampler from dgl.geometry import farthest_point_sampler
import backend as F
import numpy as np
def test_fps(): def test_fps():
N = 1000 N = 1000
batch_size = 5 batch_size = 5
sample_points = 10 sample_points = 10
x = mx.nd.array(np.random.uniform(size=(batch_size, int(N/batch_size), 3))) x = mx.nd.array(
np.random.uniform(size=(batch_size, int(N / batch_size), 3))
)
ctx = F.ctx() ctx = F.ctx()
if F.gpu_ctx(): if F.gpu_ctx():
x = x.as_in_context(ctx) x = x.as_in_context(ctx)
...@@ -17,5 +20,6 @@ def test_fps(): ...@@ -17,5 +20,6 @@ def test_fps():
assert res.shape[1] == sample_points assert res.shape[1] == sample_points
assert res.sum() > 0 assert res.sum() > 0
if __name__ == '__main__':
if __name__ == "__main__":
test_fps() test_fps()
This diff is collapsed.
import operator
import numpy as np import numpy as np
import pytest import pytest
import dgl
import torch import torch
import operator
import dgl
from dgl.mock_sparse import SparseMatrix, diag from dgl.mock_sparse import SparseMatrix, diag
parametrize_idtype = pytest.mark.parametrize( parametrize_idtype = pytest.mark.parametrize(
......
This diff is collapsed.
import backend as F import backend as F
import dgl.nn
import dgl
import numpy as np import numpy as np
import pytest import pytest
import torch as th import torch as th
from dgl import DGLError
from dgl.base import DGLWarning
from dgl.geometry import neighbor_matching, farthest_point_sampler
from test_utils import parametrize_idtype from test_utils import parametrize_idtype
from test_utils.graph_cases import get_cases from test_utils.graph_cases import get_cases
import dgl
import dgl.nn
from dgl import DGLError
from dgl.base import DGLWarning
from dgl.geometry import farthest_point_sampler, neighbor_matching
def test_fps(): def test_fps():
N = 1000 N = 1000
batch_size = 5 batch_size = 5
sample_points = 10 sample_points = 10
x = th.tensor(np.random.uniform(size=(batch_size, int(N/batch_size), 3))) x = th.tensor(np.random.uniform(size=(batch_size, int(N / batch_size), 3)))
ctx = F.ctx() ctx = F.ctx()
if F.gpu_ctx(): if F.gpu_ctx():
x = x.to(ctx) x = x.to(ctx)
...@@ -29,17 +30,18 @@ def test_fps_start_idx(): ...@@ -29,17 +30,18 @@ def test_fps_start_idx():
N = 1000 N = 1000
batch_size = 5 batch_size = 5
sample_points = 10 sample_points = 10
x = th.tensor(np.random.uniform(size=(batch_size, int(N/batch_size), 3))) x = th.tensor(np.random.uniform(size=(batch_size, int(N / batch_size), 3)))
ctx = F.ctx() ctx = F.ctx()
if F.gpu_ctx(): if F.gpu_ctx():
x = x.to(ctx) x = x.to(ctx)
res = farthest_point_sampler(x, sample_points, start_idx=0) res = farthest_point_sampler(x, sample_points, start_idx=0)
assert th.any(res[:, 0] == 0) assert th.any(res[:, 0] == 0)
def _test_knn_common(device, algorithm, dist, exclude_self): def _test_knn_common(device, algorithm, dist, exclude_self):
x = th.randn(8, 3).to(device) x = th.randn(8, 3).to(device)
kg = dgl.nn.KNNGraph(3) kg = dgl.nn.KNNGraph(3)
if dist == 'euclidean': if dist == "euclidean":
d = th.cdist(x, x).to(F.cpu()) d = th.cdist(x, x).to(F.cpu())
else: else:
x = x + th.randn(1).item() x = x + th.randn(1).item()
...@@ -55,7 +57,14 @@ def _test_knn_common(device, algorithm, dist, exclude_self): ...@@ -55,7 +57,14 @@ def _test_knn_common(device, algorithm, dist, exclude_self):
assert len(src) == k assert len(src) == k
if check_indices: if check_indices:
i = v - start i = v - start
src_ans = set(th.topk(d[start:end, start:end][i], k + (1 if exclude_self else 0), largest=False)[1].numpy() + start) src_ans = set(
th.topk(
d[start:end, start:end][i],
k + (1 if exclude_self else 0),
largest=False,
)[1].numpy()
+ start
)
if exclude_self: if exclude_self:
# remove self # remove self
src_ans.remove(v) src_ans.remove(v)
...@@ -63,7 +72,9 @@ def _test_knn_common(device, algorithm, dist, exclude_self): ...@@ -63,7 +72,9 @@ def _test_knn_common(device, algorithm, dist, exclude_self):
def check_batch(g, k, expected_batch_info): def check_batch(g, k, expected_batch_info):
assert F.array_equal(g.batch_num_nodes(), F.tensor(expected_batch_info)) assert F.array_equal(g.batch_num_nodes(), F.tensor(expected_batch_info))
assert F.array_equal(g.batch_num_edges(), k*F.tensor(expected_batch_info)) assert F.array_equal(
g.batch_num_edges(), k * F.tensor(expected_batch_info)
)
# check knn with 2d input # check knn with 2d input
g = kg(x, algorithm, dist, exclude_self) g = kg(x, algorithm, dist, exclude_self)
...@@ -145,23 +156,27 @@ def _test_knn_common(device, algorithm, dist, exclude_self): ...@@ -145,23 +156,27 @@ def _test_knn_common(device, algorithm, dist, exclude_self):
kg = dgl.nn.SegmentedKNNGraph(3) kg = dgl.nn.SegmentedKNNGraph(3)
g = kg(x, [4, 7, 5, 4], algorithm, dist, exclude_self) g = kg(x, [4, 7, 5, 4], algorithm, dist, exclude_self)
# different algorithms may break the tie differently, so don't check the indices # different algorithms may break the tie differently, so don't check the indices
check_knn(g, x, 0, 4, 3, exclude_self, False) check_knn(g, x, 0, 4, 3, exclude_self, False)
check_knn(g, x, 4, 11, 3, exclude_self, False) check_knn(g, x, 4, 11, 3, exclude_self, False)
check_knn(g, x, 11, 16, 3, exclude_self, False) check_knn(g, x, 11, 16, 3, exclude_self, False)
check_knn(g, x, 16, 20, 3, exclude_self, False) check_knn(g, x, 16, 20, 3, exclude_self, False)
check_batch(g, 3, [4, 7, 5, 4]) check_batch(g, 3, [4, 7, 5, 4])
@pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize(
@pytest.mark.parametrize('dist', ['euclidean', 'cosine']) "algorithm", ["bruteforce-blas", "bruteforce", "kd-tree"]
@pytest.mark.parametrize('exclude_self', [False, True]) )
@pytest.mark.parametrize("dist", ["euclidean", "cosine"])
@pytest.mark.parametrize("exclude_self", [False, True])
def test_knn_cpu(algorithm, dist, exclude_self): def test_knn_cpu(algorithm, dist, exclude_self):
_test_knn_common(F.cpu(), algorithm, dist, exclude_self) _test_knn_common(F.cpu(), algorithm, dist, exclude_self)
@pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'bruteforce-sharemem']) @pytest.mark.parametrize(
@pytest.mark.parametrize('dist', ['euclidean', 'cosine']) "algorithm", ["bruteforce-blas", "bruteforce", "bruteforce-sharemem"]
@pytest.mark.parametrize('exclude_self', [False, True]) )
@pytest.mark.parametrize("dist", ["euclidean", "cosine"])
@pytest.mark.parametrize("exclude_self", [False, True])
def test_knn_cuda(algorithm, dist, exclude_self): def test_knn_cuda(algorithm, dist, exclude_self):
if not th.cuda.is_available(): if not th.cuda.is_available():
return return
...@@ -169,9 +184,9 @@ def test_knn_cuda(algorithm, dist, exclude_self): ...@@ -169,9 +184,9 @@ def test_knn_cuda(algorithm, dist, exclude_self):
@parametrize_idtype @parametrize_idtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize("g", get_cases(["homo"], exclude=["dglgraph"]))
@pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize("weight", [True, False])
@pytest.mark.parametrize('relabel', [True, False]) @pytest.mark.parametrize("relabel", [True, False])
def test_edge_coarsening(idtype, g, weight, relabel): def test_edge_coarsening(idtype, g, weight, relabel):
num_nodes = g.num_nodes() num_nodes = g.num_nodes()
g = dgl.to_bidirected(g) g = dgl.to_bidirected(g)
...@@ -205,7 +220,7 @@ def test_edge_coarsening(idtype, g, weight, relabel): ...@@ -205,7 +220,7 @@ def test_edge_coarsening(idtype, g, weight, relabel):
assert g.has_edges_between(u, v) assert g.has_edges_between(u, v)
if __name__ == '__main__': if __name__ == "__main__":
test_fps() test_fps()
test_fps_start_idx() test_fps_start_idx()
test_knn() test_knn()
import dgl
import torch as th
import torch.multiprocessing as mp
import os import os
import unittest import unittest
import torch as th
import torch.multiprocessing as mp
import dgl
def sub_ipc(g): def sub_ipc(g):
print(g) print(g)
return g return g
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(os.name == "nt", reason="Do not support windows yet")
def test_torch_ipc(): def test_torch_ipc():
g = dgl.graph(([0, 1, 2], [1, 2, 3])) g = dgl.graph(([0, 1, 2], [1, 2, 3]))
ctx = mp.get_context("spawn") ctx = mp.get_context("spawn")
p = ctx.Process(target=sub_ipc, args=(g, )) p = ctx.Process(target=sub_ipc, args=(g,))
p.start() p.start()
p.join() p.join()
if __name__ == "__main__": if __name__ == "__main__":
test_torch_ipc() test_torch_ipc()
\ No newline at end of file
This diff is collapsed.
import io
import pickle
import networkx as nx import networkx as nx
import dgl
import torch import torch
import pickle
import io import dgl
def _reconstruct_pickle(obj): def _reconstruct_pickle(obj):
f = io.BytesIO() f = io.BytesIO()
...@@ -12,15 +15,17 @@ def _reconstruct_pickle(obj): ...@@ -12,15 +15,17 @@ def _reconstruct_pickle(obj):
f.close() f.close()
return obj return obj
def test_pickling_batched_graph(): def test_pickling_batched_graph():
# NOTE: this is a test for a wierd bug mentioned in # NOTE: this is a test for a wierd bug mentioned in
# https://github.com/dmlc/dgl/issues/438 # https://github.com/dmlc/dgl/issues/438
glist = [nx.path_graph(i + 5) for i in range(5)] glist = [nx.path_graph(i + 5) for i in range(5)]
glist = [dgl.DGLGraph(g) for g in glist] glist = [dgl.DGLGraph(g) for g in glist]
bg = dgl.batch(glist) bg = dgl.batch(glist)
bg.ndata['x'] = torch.randn((35, 5)) bg.ndata["x"] = torch.randn((35, 5))
bg.edata['y'] = torch.randn((60, 3)) bg.edata["y"] = torch.randn((60, 3))
new_bg = _reconstruct_pickle(bg) new_bg = _reconstruct_pickle(bg)
if __name__ == '__main__':
if __name__ == "__main__":
test_pickling_batched_graph() test_pickling_batched_graph()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment