Unverified Commit 98325b10 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Black auto fix. (#4691)


Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent c24e285a
import os
import time
import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
from dgl.contrib import KVServer
from dgl.contrib import KVClient
from numpy.testing import assert_array_equal
import os
import time
import dgl
from dgl import utils
from dgl.contrib import KVClient, KVServer
num_entries = 10
dim_size = 3
server_namebook = {0:[0, '127.0.0.1', 30070, 1]}
server_namebook = {0: [0, "127.0.0.1", 30070, 1]}
data_0 = F.zeros((num_entries, dim_size), F.float32, F.cpu())
g2l_0 = F.arange(0, num_entries)
partition_0 = F.zeros(num_entries, F.int64, F.cpu())
data_1 = F.zeros((num_entries*2, dim_size), F.float32, F.cpu())
g2l_1 = F.arange(0, num_entries*2)
partition_1 = F.zeros(num_entries*2, F.int64, F.cpu())
data_1 = F.zeros((num_entries * 2, dim_size), F.float32, F.cpu())
g2l_1 = F.arange(0, num_entries * 2)
partition_1 = F.zeros(num_entries * 2, F.int64, F.cpu())
data_3 = F.zeros((num_entries, dim_size), F.int64, F.cpu())
data_4 = F.zeros((num_entries, dim_size), F.float64, F.cpu())
data_5 = F.zeros((num_entries, dim_size), F.int32, F.cpu())
def start_server():
my_server = KVServer(server_id=0, server_namebook=server_namebook, num_client=1)
my_server.set_global2local(name='data_0', global2local=g2l_0)
my_server.set_global2local(name='data_1', global2local=g2l_1)
my_server.set_global2local(name='data_3', global2local=g2l_0)
my_server.set_global2local(name='data_4', global2local=g2l_0)
my_server.set_global2local(name='data_5', global2local=g2l_0)
my_server.set_partition_book(name='data_0', partition_book=partition_0)
my_server.set_partition_book(name='data_1', partition_book=partition_1)
my_server.set_partition_book(name='data_3', partition_book=partition_0)
my_server.set_partition_book(name='data_4', partition_book=partition_0)
my_server.set_partition_book(name='data_5', partition_book=partition_0)
my_server.init_data(name='data_0', data_tensor=data_0)
my_server.init_data(name='data_1', data_tensor=data_1)
my_server.init_data(name='data_3', data_tensor=data_3)
my_server.init_data(name='data_4', data_tensor=data_4)
my_server.init_data(name='data_5', data_tensor=data_5)
my_server = KVServer(
server_id=0, server_namebook=server_namebook, num_client=1
)
my_server.set_global2local(name="data_0", global2local=g2l_0)
my_server.set_global2local(name="data_1", global2local=g2l_1)
my_server.set_global2local(name="data_3", global2local=g2l_0)
my_server.set_global2local(name="data_4", global2local=g2l_0)
my_server.set_global2local(name="data_5", global2local=g2l_0)
my_server.set_partition_book(name="data_0", partition_book=partition_0)
my_server.set_partition_book(name="data_1", partition_book=partition_1)
my_server.set_partition_book(name="data_3", partition_book=partition_0)
my_server.set_partition_book(name="data_4", partition_book=partition_0)
my_server.set_partition_book(name="data_5", partition_book=partition_0)
my_server.init_data(name="data_0", data_tensor=data_0)
my_server.init_data(name="data_1", data_tensor=data_1)
my_server.init_data(name="data_3", data_tensor=data_3)
my_server.init_data(name="data_4", data_tensor=data_4)
my_server.init_data(name="data_5", data_tensor=data_5)
my_server.start()
......@@ -52,84 +55,117 @@ def start_client():
my_client = KVClient(server_namebook=server_namebook)
my_client.connect()
my_client.init_data(name='data_2', shape=(num_entries, dim_size), dtype=F.float32, target_name='data_0')
my_client.init_data(
name="data_2",
shape=(num_entries, dim_size),
dtype=F.float32,
target_name="data_0",
)
print("Init data from client..")
name_list = my_client.get_data_name_list()
assert len(name_list) == 6
assert 'data_0' in name_list
assert 'data_1' in name_list
assert 'data_2' in name_list
assert 'data_3' in name_list
assert 'data_4' in name_list
assert 'data_5' in name_list
meta_0 = my_client.get_data_meta('data_0')
assert "data_0" in name_list
assert "data_1" in name_list
assert "data_2" in name_list
assert "data_3" in name_list
assert "data_4" in name_list
assert "data_5" in name_list
meta_0 = my_client.get_data_meta("data_0")
assert meta_0[0] == F.float32
assert meta_0[1] == tuple(F.shape(data_0))
assert_array_equal(meta_0[2], partition_0)
meta_1 = my_client.get_data_meta('data_1')
meta_1 = my_client.get_data_meta("data_1")
assert meta_1[0] == F.float32
assert meta_1[1] == tuple(F.shape(data_1))
assert_array_equal(meta_1[2], partition_1)
meta_2 = my_client.get_data_meta('data_2')
meta_2 = my_client.get_data_meta("data_2")
assert meta_2[0] == F.float32
assert meta_2[1] == tuple(F.shape(data_0))
assert_array_equal(meta_2[2], partition_0)
meta_3 = my_client.get_data_meta('data_3')
meta_3 = my_client.get_data_meta("data_3")
assert meta_3[0] == F.int64
assert meta_3[1] == tuple(F.shape(data_3))
assert_array_equal(meta_3[2], partition_0)
assert_array_equal(meta_3[2], partition_0)
meta_4 = my_client.get_data_meta('data_4')
meta_4 = my_client.get_data_meta("data_4")
assert meta_4[0] == F.float64
assert meta_4[1] == tuple(F.shape(data_4))
assert_array_equal(meta_3[2], partition_0)
assert_array_equal(meta_3[2], partition_0)
meta_5 = my_client.get_data_meta('data_5')
meta_5 = my_client.get_data_meta("data_5")
assert meta_5[0] == F.int32
assert meta_5[1] == tuple(F.shape(data_5))
assert_array_equal(meta_3[2], partition_0)
my_client.push(name='data_0', id_tensor=F.tensor([0, 1, 2]), data_tensor=F.tensor([[1.,1.,1.],[2.,2.,2.],[3.,3.,3.]]))
my_client.push(name='data_2', id_tensor=F.tensor([0, 1, 2]), data_tensor=F.tensor([[1.,1.,1.],[2.,2.,2.],[3.,3.,3.]]))
my_client.push(name='data_3', id_tensor=F.tensor([0, 1, 2]), data_tensor=F.tensor([[1,1,1],[2,2,2],[3,3,3]]))
my_client.push(name='data_4', id_tensor=F.tensor([0, 1, 2]), data_tensor=F.tensor([[1.,1.,1.],[2.,2.,2.],[3.,3.,3.]], F.float64))
my_client.push(name='data_5', id_tensor=F.tensor([0, 1, 2]), data_tensor=F.tensor([[1,1,1],[2,2,2],[3,3,3]], F.int32))
target = F.tensor([[1.,1.,1.],[2.,2.,2.],[3.,3.,3.]])
res = my_client.pull(name='data_0', id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(meta_3[2], partition_0)
my_client.push(
name="data_0",
id_tensor=F.tensor([0, 1, 2]),
data_tensor=F.tensor(
[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]
),
)
my_client.push(
name="data_2",
id_tensor=F.tensor([0, 1, 2]),
data_tensor=F.tensor(
[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]
),
)
my_client.push(
name="data_3",
id_tensor=F.tensor([0, 1, 2]),
data_tensor=F.tensor([[1, 1, 1], [2, 2, 2], [3, 3, 3]]),
)
my_client.push(
name="data_4",
id_tensor=F.tensor([0, 1, 2]),
data_tensor=F.tensor(
[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]], F.float64
),
)
my_client.push(
name="data_5",
id_tensor=F.tensor([0, 1, 2]),
data_tensor=F.tensor([[1, 1, 1], [2, 2, 2], [3, 3, 3]], F.int32),
)
target = F.tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]])
res = my_client.pull(name="data_0", id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(res, target)
res = my_client.pull(name='data_2', id_tensor=F.tensor([0, 1, 2]))
res = my_client.pull(name="data_2", id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(res, target)
target = F.tensor([[1,1,1],[2,2,2],[3,3,3]])
target = F.tensor([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
res = my_client.pull(name='data_3', id_tensor=F.tensor([0, 1, 2]))
res = my_client.pull(name="data_3", id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(res, target)
target = F.tensor([[1.,1.,1.],[2.,2.,2.],[3.,3.,3.]], F.float64)
target = F.tensor(
[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]], F.float64
)
res = my_client.pull(name='data_4', id_tensor=F.tensor([0, 1, 2]))
res = my_client.pull(name="data_4", id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(res, target)
target = F.tensor([[1,1,1],[2,2,2],[3,3,3]], F.int32)
target = F.tensor([[1, 1, 1], [2, 2, 2], [3, 3, 3]], F.int32)
res = my_client.pull(name='data_5', id_tensor=F.tensor([0, 1, 2]))
res = my_client.pull(name="data_5", id_tensor=F.tensor([0, 1, 2]))
assert_array_equal(res, target)
my_client.shut_down()
if __name__ == '__main__':
if __name__ == "__main__":
pid = os.fork()
if pid == 0:
start_server()
else:
time.sleep(2) # wait trainer start
time.sleep(2) # wait trainer start
start_client()
import backend as F
from test_utils import parametrize_idtype
import dgl
@parametrize_idtype
def test_heterograph_merge(idtype):
g1 = dgl.heterograph({("a", "to", "b"): ([0,1], [1,0])}).astype(idtype).to(F.ctx())
g1 = (
dgl.heterograph({("a", "to", "b"): ([0, 1], [1, 0])})
.astype(idtype)
.to(F.ctx())
)
g1_n_edges = g1.num_edges(etype="to")
g1.nodes["a"].data["nh"] = F.randn((2,3))
g1.nodes["b"].data["nh"] = F.randn((2,3))
g1.edges["to"].data["eh"] = F.randn((2,3))
g2 = dgl.heterograph({("a", "to", "b"): ([1,2,3], [2,3,5])}).astype(idtype).to(F.ctx())
g2.nodes["a"].data["nh"] = F.randn((4,3))
g2.nodes["b"].data["nh"] = F.randn((6,3))
g2.edges["to"].data["eh"] = F.randn((3,3))
g1.nodes["a"].data["nh"] = F.randn((2, 3))
g1.nodes["b"].data["nh"] = F.randn((2, 3))
g1.edges["to"].data["eh"] = F.randn((2, 3))
g2 = (
dgl.heterograph({("a", "to", "b"): ([1, 2, 3], [2, 3, 5])})
.astype(idtype)
.to(F.ctx())
)
g2.nodes["a"].data["nh"] = F.randn((4, 3))
g2.nodes["b"].data["nh"] = F.randn((6, 3))
g2.edges["to"].data["eh"] = F.randn((3, 3))
g2.add_nodes(3, ntype="a")
g2.add_nodes(3, ntype="b")
......@@ -36,14 +46,10 @@ def test_heterograph_merge(idtype):
g2_n_nodes = g2.num_nodes(ntype=ntype)
updated_g1_ndata = F.asnumpy(m.nodes[ntype].data[key][:g2_n_nodes])
g2_ndata = F.asnumpy(g2.nodes[ntype].data[key])
assert all(
(updated_g1_ndata == g2_ndata).flatten()
)
assert all((updated_g1_ndata == g2_ndata).flatten())
# Check g1's edge data was updated with g2's in m.
for key in m.edges["to"].data:
updated_g1_edata = F.asnumpy(m.edges["to"].data[key][g1_n_edges:])
g2_edata = F.asnumpy(g2.edges["to"].data[key])
assert all(
(updated_g1_edata == g2_edata).flatten()
)
assert all((updated_g1_edata == g2_edata).flatten())
from dgl.cuda import nccl
from dgl.partition import NDArrayPartition
import unittest
import backend as F
from dgl.cuda import nccl
from dgl.partition import NDArrayPartition
def gen_test_id():
return '{:0256x}'.format(78236728318467363)
return "{:0256x}".format(78236728318467363)
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_id():
nccl_id = nccl.UniqueId()
text = str(nccl_id)
......@@ -24,7 +29,9 @@ def test_nccl_id():
assert nccl_id2 == nccl_id3
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_sparse_push_single_remainder():
nccl_id = nccl.UniqueId()
comm = nccl.Communicator(1, 0, nccl_id)
......@@ -32,13 +39,16 @@ def test_nccl_sparse_push_single_remainder():
index = F.randint([10000], F.int32, F.ctx(), 0, 10000)
value = F.uniform([10000, 100], F.float32, F.ctx(), -1.0, 1.0)
part = NDArrayPartition(10000, 1, 'remainder')
part = NDArrayPartition(10000, 1, "remainder")
ri, rv = comm.sparse_all_to_all_push(index, value, part)
assert F.array_equal(ri, index)
assert F.array_equal(rv, value)
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_sparse_pull_single_remainder():
nccl_id = nccl.UniqueId()
comm = nccl.Communicator(1, 0, nccl_id)
......@@ -46,13 +56,16 @@ def test_nccl_sparse_pull_single_remainder():
req_index = F.randint([10000], F.int64, F.ctx(), 0, 100000)
value = F.uniform([100000, 100], F.float32, F.ctx(), -1.0, 1.0)
part = NDArrayPartition(100000, 1, 'remainder')
part = NDArrayPartition(100000, 1, "remainder")
rv = comm.sparse_all_to_all_pull(req_index, value, part)
exp_rv = F.gather_row(value, req_index)
assert F.array_equal(rv, exp_rv)
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_sparse_push_single_range():
nccl_id = nccl.UniqueId()
comm = nccl.Communicator(1, 0, nccl_id)
......@@ -60,14 +73,19 @@ def test_nccl_sparse_push_single_range():
index = F.randint([10000], F.int32, F.ctx(), 0, 10000)
value = F.uniform([10000, 100], F.float32, F.ctx(), -1.0, 1.0)
part_ranges = F.copy_to(F.tensor([0, value.shape[0]], dtype=F.int64), F.ctx())
part = NDArrayPartition(10000, 1, 'range', part_ranges=part_ranges)
part_ranges = F.copy_to(
F.tensor([0, value.shape[0]], dtype=F.int64), F.ctx()
)
part = NDArrayPartition(10000, 1, "range", part_ranges=part_ranges)
ri, rv = comm.sparse_all_to_all_push(index, value, part)
assert F.array_equal(ri, index)
assert F.array_equal(rv, value)
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_sparse_pull_single_range():
nccl_id = nccl.UniqueId()
comm = nccl.Communicator(1, 0, nccl_id)
......@@ -75,20 +93,26 @@ def test_nccl_sparse_pull_single_range():
req_index = F.randint([10000], F.int64, F.ctx(), 0, 100000)
value = F.uniform([100000, 100], F.float32, F.ctx(), -1.0, 1.0)
part_ranges = F.copy_to(F.tensor([0, value.shape[0]], dtype=F.int64), F.ctx())
part = NDArrayPartition(100000, 1, 'range', part_ranges=part_ranges)
part_ranges = F.copy_to(
F.tensor([0, value.shape[0]], dtype=F.int64), F.ctx()
)
part = NDArrayPartition(100000, 1, "range", part_ranges=part_ranges)
rv = comm.sparse_all_to_all_pull(req_index, value, part)
exp_rv = F.gather_row(value, req_index)
assert F.array_equal(rv, exp_rv)
@unittest.skipIf(F._default_context_str == 'cpu', reason="NCCL only runs on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu", reason="NCCL only runs on GPU."
)
def test_nccl_support():
# this is just a smoke test, as we don't have any other way to know
# if NCCL support is compiled in right now.
nccl.is_supported()
if __name__ == '__main__':
if __name__ == "__main__":
test_nccl_id()
test_nccl_sparse_push_single()
test_nccl_sparse_pull_single()
import dgl
import dgl.function as fn
from collections import Counter
import numpy as np
import scipy.sparse as ssp
import itertools
import unittest
from collections import Counter
from itertools import product
import backend as F
import networkx as nx
import unittest, pytest
from dgl import DGLError
import numpy as np
import pytest
import scipy.sparse as ssp
import test_utils
from test_utils import parametrize_idtype, get_cases
from scipy.sparse import rand
rfuncs = {'sum': fn.sum, 'max': fn.max, 'min': fn.min, 'mean': fn.mean}
from test_utils import get_cases, parametrize_idtype
import dgl
import dgl.function as fn
from dgl import DGLError
rfuncs = {"sum": fn.sum, "max": fn.max, "min": fn.min, "mean": fn.mean}
feat_size = 2
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@unittest.skipIf(
dgl.backend.backend_name != "pytorch", reason="Only support PyTorch for now"
)
def create_test_heterograph(idtype):
# test heterograph from the docstring, plus a user -- wishes -- game relation
# 3 users, 2 games, 2 developers
......@@ -26,16 +32,21 @@ def create_test_heterograph(idtype):
# ('user', 'wishes', 'game'),
# ('developer', 'develops', 'game')])
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ([0, 1, 1], [0, 0, 1]),
('developer', 'develops', 'game'): ([0, 1, 0], [0, 1, 1]),
}, idtype=idtype, device=F.ctx())
g = dgl.heterograph(
{
("user", "follows", "user"): ([0, 1, 2, 1], [0, 0, 1, 1]),
("user", "plays", "game"): ([0, 1, 2, 1], [0, 0, 1, 1]),
("user", "wishes", "game"): ([0, 1, 1], [0, 0, 1]),
("developer", "develops", "game"): ([0, 1, 0], [0, 1, 1]),
},
idtype=idtype,
device=F.ctx(),
)
assert g.idtype == idtype
assert g.device == F.ctx()
return g
def create_test_heterograph_2(idtype):
src = np.random.randint(0, 50, 25)
......@@ -44,31 +55,41 @@ def create_test_heterograph_2(idtype):
dst1 = np.random.randint(0, 25, 10)
src2 = np.random.randint(0, 100, 1000)
dst2 = np.random.randint(0, 100, 1000)
g = dgl.heterograph({
('user', 'becomes', 'player'): (src, dst),
('user', 'follows', 'user'): (src, dst),
('user', 'plays', 'game'): (src, dst),
('user', 'wishes', 'game'): (src1, dst1),
('developer', 'develops', 'game'): (src2, dst2),
}, idtype=idtype, device=F.ctx())
g = dgl.heterograph(
{
("user", "becomes", "player"): (src, dst),
("user", "follows", "user"): (src, dst),
("user", "plays", "game"): (src, dst),
("user", "wishes", "game"): (src1, dst1),
("developer", "develops", "game"): (src2, dst2),
},
idtype=idtype,
device=F.ctx(),
)
assert g.idtype == idtype
assert g.device == F.ctx()
return g
def create_test_heterograph_large(idtype):
src = np.random.randint(0, 50, 2500)
dst = np.random.randint(0, 50, 2500)
g = dgl.heterograph({
('user', 'follows', 'user'): (src, dst),
('user', 'plays', 'game'): (src, dst),
('user', 'wishes', 'game'): (src, dst),
('developer', 'develops', 'game'): (src, dst),
}, idtype=idtype, device=F.ctx())
g = dgl.heterograph(
{
("user", "follows", "user"): (src, dst),
("user", "plays", "game"): (src, dst),
("user", "wishes", "game"): (src, dst),
("developer", "develops", "game"): (src, dst),
},
idtype=idtype,
device=F.ctx(),
)
assert g.idtype == idtype
assert g.device == F.ctx()
return g
@parametrize_idtype
def test_unary_copy_u(idtype):
def _test(mfunc, rfunc):
......@@ -76,12 +97,12 @@ def test_unary_copy_u(idtype):
g0 = create_test_heterograph(idtype)
g1 = create_test_heterograph_large(idtype)
cross_reducer = rfunc.__name__
x1 = F.randn((g.num_nodes('user'), feat_size))
x2 = F.randn((g.num_nodes('developer'), feat_size))
x1 = F.randn((g.num_nodes("user"), feat_size))
x2 = F.randn((g.num_nodes("developer"), feat_size))
F.attach_grad(x1)
F.attach_grad(x2)
g.nodes['user'].data['h'] = x1
g.nodes['developer'].data['h'] = x2
g.nodes["user"].data["h"] = x1
g.nodes["developer"].data["h"] = x2
#################################################################
# multi_update_all(): call msg_passing separately for each etype
......@@ -89,21 +110,24 @@ def test_unary_copy_u(idtype):
with F.record_grad():
g.multi_update_all(
{etype : (mfunc('h', 'm'), rfunc('m', 'y'))
for etype in g.canonical_etypes},
cross_reducer)
r1 = g.nodes['game'].data['y'].clone()
r2 = g.nodes['user'].data['y'].clone()
r3 = g.nodes['player'].data['y'].clone()
{
etype: (mfunc("h", "m"), rfunc("m", "y"))
for etype in g.canonical_etypes
},
cross_reducer,
)
r1 = g.nodes["game"].data["y"].clone()
r2 = g.nodes["user"].data["y"].clone()
r3 = g.nodes["player"].data["y"].clone()
loss = r1.sum() + r2.sum() + r3.sum()
F.backward(loss)
n_grad1 = F.grad(g.nodes['user'].data['h']).clone()
n_grad2 = F.grad(g.nodes['developer'].data['h']).clone()
n_grad1 = F.grad(g.nodes["user"].data["h"]).clone()
n_grad2 = F.grad(g.nodes["developer"].data["h"]).clone()
g.nodes['user'].data.clear()
g.nodes['developer'].data.clear()
g.nodes['game'].data.clear()
g.nodes['player'].data.clear()
g.nodes["user"].data.clear()
g.nodes["developer"].data.clear()
g.nodes["game"].data.clear()
g.nodes["player"].data.clear()
#################################################################
# update_all(): call msg_passing for all etypes
......@@ -111,29 +135,31 @@ def test_unary_copy_u(idtype):
F.attach_grad(x1)
F.attach_grad(x2)
g.nodes['user'].data['h'] = x1
g.nodes['developer'].data['h'] = x2
g.nodes["user"].data["h"] = x1
g.nodes["developer"].data["h"] = x2
with F.record_grad():
g.update_all(mfunc('h', 'm'), rfunc('m', 'y'))
r4 = g.nodes['game'].data['y']
r5 = g.nodes['user'].data['y']
r6 = g.nodes['player'].data['y']
g.update_all(mfunc("h", "m"), rfunc("m", "y"))
r4 = g.nodes["game"].data["y"]
r5 = g.nodes["user"].data["y"]
r6 = g.nodes["player"].data["y"]
loss = r4.sum() + r5.sum() + r6.sum()
F.backward(loss)
n_grad3 = F.grad(g.nodes['user'].data['h'])
n_grad4 = F.grad(g.nodes['developer'].data['h'])
n_grad3 = F.grad(g.nodes["user"].data["h"])
n_grad4 = F.grad(g.nodes["developer"].data["h"])
assert F.allclose(r1, r4)
assert F.allclose(r2, r5)
assert F.allclose(r3, r6)
assert(F.allclose(n_grad1, n_grad3))
assert(F.allclose(n_grad2, n_grad4))
assert F.allclose(n_grad1, n_grad3)
assert F.allclose(n_grad2, n_grad4)
_test(fn.copy_u, fn.sum)
_test(fn.copy_u, fn.max)
_test(fn.copy_u, fn.min)
# _test('copy_u', 'mean')
@parametrize_idtype
def test_unary_copy_e(idtype):
def _test(mfunc, rfunc):
......@@ -142,18 +168,18 @@ def test_unary_copy_e(idtype):
g0 = create_test_heterograph_2(idtype)
g1 = create_test_heterograph(idtype)
cross_reducer = rfunc.__name__
x1 = F.randn((g.num_edges('plays'),feat_size))
x2 = F.randn((g.num_edges('follows'),feat_size))
x3 = F.randn((g.num_edges('develops'),feat_size))
x4 = F.randn((g.num_edges('wishes'),feat_size))
x1 = F.randn((g.num_edges("plays"), feat_size))
x2 = F.randn((g.num_edges("follows"), feat_size))
x3 = F.randn((g.num_edges("develops"), feat_size))
x4 = F.randn((g.num_edges("wishes"), feat_size))
F.attach_grad(x1)
F.attach_grad(x2)
F.attach_grad(x3)
F.attach_grad(x4)
g['plays'].edata['eid'] = x1
g['follows'].edata['eid'] = x2
g['develops'].edata['eid'] = x3
g['wishes'].edata['eid'] = x4
g["plays"].edata["eid"] = x1
g["follows"].edata["eid"] = x2
g["develops"].edata["eid"] = x3
g["wishes"].edata["eid"] = x4
#################################################################
# multi_update_all(): call msg_passing separately for each etype
......@@ -161,21 +187,23 @@ def test_unary_copy_e(idtype):
with F.record_grad():
g.multi_update_all(
{'plays' : (mfunc('eid', 'm'), rfunc('m', 'y')),
'follows': (mfunc('eid', 'm'), rfunc('m', 'y')),
'develops': (mfunc('eid', 'm'), rfunc('m', 'y')),
'wishes': (mfunc('eid', 'm'), rfunc('m', 'y'))},
cross_reducer)
r1 = g.nodes['game'].data['y'].clone()
r2 = g.nodes['user'].data['y'].clone()
{
"plays": (mfunc("eid", "m"), rfunc("m", "y")),
"follows": (mfunc("eid", "m"), rfunc("m", "y")),
"develops": (mfunc("eid", "m"), rfunc("m", "y")),
"wishes": (mfunc("eid", "m"), rfunc("m", "y")),
},
cross_reducer,
)
r1 = g.nodes["game"].data["y"].clone()
r2 = g.nodes["user"].data["y"].clone()
loss = r1.sum() + r2.sum()
F.backward(loss)
e_grad1 = F.grad(g['develops'].edata['eid']).clone()
e_grad2 = F.grad(g['plays'].edata['eid']).clone()
e_grad3 = F.grad(g['wishes'].edata['eid']).clone()
e_grad4 = F.grad(g['follows'].edata['eid']).clone()
{etype : (g[etype].edata.clear())
for _, etype, _ in g.canonical_etypes},
e_grad1 = F.grad(g["develops"].edata["eid"]).clone()
e_grad2 = F.grad(g["plays"].edata["eid"]).clone()
e_grad3 = F.grad(g["wishes"].edata["eid"]).clone()
e_grad4 = F.grad(g["follows"].edata["eid"]).clone()
{etype: (g[etype].edata.clear()) for _, etype, _ in g.canonical_etypes},
#################################################################
# update_all(): call msg_passing for all etypes
......@@ -187,67 +215,71 @@ def test_unary_copy_e(idtype):
F.attach_grad(x3)
F.attach_grad(x4)
g['plays'].edata['eid'] = x1
g['follows'].edata['eid'] = x2
g['develops'].edata['eid'] = x3
g['wishes'].edata['eid'] = x4
g["plays"].edata["eid"] = x1
g["follows"].edata["eid"] = x2
g["develops"].edata["eid"] = x3
g["wishes"].edata["eid"] = x4
with F.record_grad():
g.update_all(mfunc('eid', 'm'), rfunc('m', 'y'))
r3 = g.nodes['game'].data['y']
r4 = g.nodes['user'].data['y']
g.update_all(mfunc("eid", "m"), rfunc("m", "y"))
r3 = g.nodes["game"].data["y"]
r4 = g.nodes["user"].data["y"]
loss = r3.sum() + r4.sum()
F.backward(loss)
e_grad5 = F.grad(g['develops'].edata['eid'])
e_grad6 = F.grad(g['plays'].edata['eid'])
e_grad7 = F.grad(g['wishes'].edata['eid'])
e_grad8 = F.grad(g['follows'].edata['eid'])
e_grad5 = F.grad(g["develops"].edata["eid"])
e_grad6 = F.grad(g["plays"].edata["eid"])
e_grad7 = F.grad(g["wishes"].edata["eid"])
e_grad8 = F.grad(g["follows"].edata["eid"])
# # correctness check
def _print_error(a, b):
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y))
print("@{} {} v.s. {}".format(i, x, y))
assert F.allclose(r1, r3)
assert F.allclose(r2, r4)
assert(F.allclose(e_grad1, e_grad5))
assert(F.allclose(e_grad2, e_grad6))
assert(F.allclose(e_grad3, e_grad7))
assert(F.allclose(e_grad4, e_grad8))
assert F.allclose(e_grad1, e_grad5)
assert F.allclose(e_grad2, e_grad6)
assert F.allclose(e_grad3, e_grad7)
assert F.allclose(e_grad4, e_grad8)
_test(fn.copy_e, fn.sum)
_test(fn.copy_e, fn.max)
_test(fn.copy_e, fn.min)
# _test('copy_e', 'mean')
@parametrize_idtype
def test_binary_op(idtype):
def _test(lhs, rhs, binary_op, reducer):
g = create_test_heterograph(idtype)
x1 = F.randn((g.num_nodes('user'), feat_size))
x2 = F.randn((g.num_nodes('developer'), feat_size))
x3 = F.randn((g.num_nodes('game'), feat_size))
x1 = F.randn((g.num_nodes("user"), feat_size))
x2 = F.randn((g.num_nodes("developer"), feat_size))
x3 = F.randn((g.num_nodes("game"), feat_size))
F.attach_grad(x1)
F.attach_grad(x2)
F.attach_grad(x3)
g.nodes['user'].data['h'] = x1
g.nodes['developer'].data['h'] = x2
g.nodes['game'].data['h'] = x3
x1 = F.randn((4,feat_size))
x2 = F.randn((4,feat_size))
x3 = F.randn((3,feat_size))
x4 = F.randn((3,feat_size))
g.nodes["user"].data["h"] = x1
g.nodes["developer"].data["h"] = x2
g.nodes["game"].data["h"] = x3
x1 = F.randn((4, feat_size))
x2 = F.randn((4, feat_size))
x3 = F.randn((3, feat_size))
x4 = F.randn((3, feat_size))
F.attach_grad(x1)
F.attach_grad(x2)
F.attach_grad(x3)
F.attach_grad(x4)
g['plays'].edata['h'] = x1
g['follows'].edata['h'] = x2
g['develops'].edata['h'] = x3
g['wishes'].edata['h'] = x4
g["plays"].edata["h"] = x1
g["follows"].edata["h"] = x2
g["develops"].edata["h"] = x3
g["wishes"].edata["h"] = x4
builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
builtin_msg = getattr(fn, builtin_msg_name)
......@@ -259,10 +291,13 @@ def test_binary_op(idtype):
with F.record_grad():
g.multi_update_all(
{etype : (builtin_msg('h', 'h', 'm'), builtin_red('m', 'y'))
for etype in g.canonical_etypes},
'sum')
r1 = g.nodes['game'].data['y']
{
etype: (builtin_msg("h", "h", "m"), builtin_red("m", "y"))
for etype in g.canonical_etypes
},
"sum",
)
r1 = g.nodes["game"].data["y"]
F.backward(r1, F.ones(r1.shape))
n_grad1 = F.grad(r1)
......@@ -270,15 +305,17 @@ def test_binary_op(idtype):
# update_all(): call msg_passing for all etypes
#################################################################
g.update_all(builtin_msg('h', 'h', 'm'), builtin_red('m', 'y'))
r2 = g.nodes['game'].data['y']
g.update_all(builtin_msg("h", "h", "m"), builtin_red("m", "y"))
r2 = g.nodes["game"].data["y"]
F.backward(r2, F.ones(r2.shape))
n_grad2 = F.grad(r2)
# correctness check
def _print_error(a, b):
for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
for i, (x, y) in enumerate(
zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())
):
if not np.allclose(x, y):
print('@{} {} v.s. {}'.format(i, x, y))
print("@{} {} v.s. {}".format(i, x, y))
if not F.allclose(r1, r2):
_print_error(r1, r2)
......@@ -300,8 +337,7 @@ def test_binary_op(idtype):
_test(lhs, rhs, binary_op, reducer)
if __name__ == '__main__':
if __name__ == "__main__":
test_unary_copy_u()
test_unary_copy_e()
test_binary_op()
from dgl.partition import NDArrayPartition
from dgl.distributed import graph_partition_book as gpb
import unittest
import backend as F
from test_utils import parametrize_idtype
from dgl.distributed import graph_partition_book as gpb
from dgl.partition import NDArrayPartition
@unittest.skipIf(F._default_context_str == 'cpu', reason="NDArrayPartition only works on GPU.")
@unittest.skipIf(
F._default_context_str == "cpu",
reason="NDArrayPartition only works on GPU.",
)
@parametrize_idtype
def test_get_node_partition_from_book(idtype):
node_map = {
"type_n": F.tensor([
[0,3],
[4,5],
[6,10]
], dtype=idtype)}
edge_map = {
"type_e": F.tensor([
[0,9],
[10,15],
[16,25]
], dtype=idtype)}
book = gpb.RangePartitionBook(0, 3, node_map, edge_map,
{"type_n": 0}, {"type_e": 0})
node_map = {"type_n": F.tensor([[0, 3], [4, 5], [6, 10]], dtype=idtype)}
edge_map = {"type_e": F.tensor([[0, 9], [10, 15], [16, 25]], dtype=idtype)}
book = gpb.RangePartitionBook(
0, 3, node_map, edge_map, {"type_n": 0}, {"type_e": 0}
)
partition = gpb.get_node_partition_from_book(book, F.ctx())
assert partition.num_parts() == 3
assert partition.array_size() == 11
......@@ -46,4 +41,3 @@ def test_get_node_partition_from_book(idtype):
act_ids = partition.map_to_global(test_ids, 2)
exp_ids = F.copy_to(F.tensor([6, 7, 10], dtype=idtype), F.ctx())
assert F.array_equal(act_ids, exp_ids)
import backend as F
import dgl
import pytest
@pytest.mark.skipif(F._default_context_str == 'cpu', reason="Need gpu for this test")
import dgl
@pytest.mark.skipif(
F._default_context_str == "cpu", reason="Need gpu for this test"
)
def test_pin_unpin():
t = F.arange(0, 100, dtype=F.int64, ctx=F.cpu())
assert not F.is_pinned(t)
if F.backend_name == 'pytorch':
if F.backend_name == "pytorch":
nd = dgl.utils.pin_memory_inplace(t)
assert F.is_pinned(t)
nd.unpin_memory_()
......@@ -28,5 +32,6 @@ def test_pin_unpin():
# tensorflow and mxnet should throw an error
dgl.utils.pin_memory_inplace(t)
if __name__ == "__main__":
test_pin_unpin()
import dgl
import networkx as nx
import backend as F
import unittest
import backend as F
import networkx as nx
import utils as U
from test_utils import parametrize_idtype
import dgl
def create_graph(idtype):
g = dgl.from_networkx(nx.path_graph(5), idtype=idtype, device=F.ctx())
return g
def mfunc(edges):
return {'m' : edges.src['x']}
return {"m": edges.src["x"]}
def rfunc(nodes):
msg = F.sum(nodes.mailbox['m'], 1)
return {'x' : nodes.data['x'] + msg}
msg = F.sum(nodes.mailbox["m"], 1)
return {"x": nodes.data["x"] + msg}
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
@parametrize_idtype
def test_prop_nodes_bfs(idtype):
g = create_graph(idtype)
g.ndata['x'] = F.ones((5, 2))
dgl.prop_nodes_bfs(g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
g.ndata["x"] = F.ones((5, 2))
dgl.prop_nodes_bfs(
g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
)
# pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
assert F.allclose(g.ndata['x'],
F.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
assert F.allclose(
g.ndata["x"],
F.tensor([[2.0, 2.0], [4.0, 4.0], [6.0, 6.0], [8.0, 8.0], [9.0, 9.0]]),
)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
@parametrize_idtype
def test_prop_edges_dfs(idtype):
g = create_graph(idtype)
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
g.ndata["x"] = F.ones((5, 2))
dgl.prop_edges_dfs(
g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
)
# snr using dfs results in a cumsum
assert F.allclose(g.ndata['x'],
F.tensor([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]]))
assert F.allclose(
g.ndata["x"],
F.tensor([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]),
)
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0, has_reverse_edge=True, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
g.ndata["x"] = F.ones((5, 2))
dgl.prop_edges_dfs(
g,
0,
has_reverse_edge=True,
message_func=mfunc,
reduce_func=rfunc,
apply_node_func=None,
)
# result is cumsum[i] + cumsum[i-1]
assert F.allclose(g.ndata['x'],
F.tensor([[1., 1.], [3., 3.], [5., 5.], [7., 7.], [9., 9.]]))
assert F.allclose(
g.ndata["x"],
F.tensor([[1.0, 1.0], [3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0]]),
)
g.ndata['x'] = F.ones((5, 2))
dgl.prop_edges_dfs(g, 0, has_nontree_edge=True, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
g.ndata["x"] = F.ones((5, 2))
dgl.prop_edges_dfs(
g,
0,
has_nontree_edge=True,
message_func=mfunc,
reduce_func=rfunc,
apply_node_func=None,
)
# result is cumsum[i] + cumsum[i+1]
assert F.allclose(g.ndata['x'],
F.tensor([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
assert F.allclose(
g.ndata["x"],
F.tensor([[3.0, 3.0], [5.0, 5.0], [7.0, 7.0], [9.0, 9.0], [5.0, 5.0]]),
)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@unittest.skipIf(F._default_context_str == "gpu", reason="GPU not implemented")
@parametrize_idtype
def test_prop_nodes_topo(idtype):
# bi-directional chain
......@@ -64,14 +98,17 @@ def test_prop_nodes_topo(idtype):
tree.add_edge(4, 2)
tree = dgl.graph(tree.edges())
# init node feature data
tree.ndata['x'] = F.zeros((5, 2))
tree.ndata["x"] = F.zeros((5, 2))
# set all leaf nodes to be ones
tree.nodes[[1, 3, 4]].data['x'] = F.ones((3, 2))
dgl.prop_nodes_topo(tree, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
tree.nodes[[1, 3, 4]].data["x"] = F.ones((3, 2))
dgl.prop_nodes_topo(
tree, message_func=mfunc, reduce_func=rfunc, apply_node_func=None
)
# root node get the sum
assert F.allclose(tree.nodes[0].data['x'], F.tensor([[3., 3.]]))
assert F.allclose(tree.nodes[0].data["x"], F.tensor([[3.0, 3.0]]))
if __name__ == '__main__':
if __name__ == "__main__":
test_prop_nodes_bfs()
test_prop_edges_dfs()
test_prop_nodes_topo()
import dgl
import unittest
import backend as F
import numpy as np
import unittest
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU random choice not implemented")
import dgl
@unittest.skipIf(
F._default_context_str == "gpu", reason="GPU random choice not implemented"
)
def test_random_choice():
# test 1
a = F.arange(0, 100)
......@@ -28,7 +33,7 @@ def test_random_choice():
assert np.array_equal(np.sort(F.asnumpy(x)), F.asnumpy(a))
# test 5, with prob
prob = np.ones((100,))
prob[37:40] = 0.
prob[37:40] = 0.0
prob -= prob.min()
prob /= prob.sum()
prob = F.tensor(prob)
......@@ -37,5 +42,6 @@ def test_random_choice():
for i in range(len(x)):
assert F.asnumpy(x[i]) < 37 or F.asnumpy(x[i]) >= 40
if __name__ == '__main__':
if __name__ == "__main__":
test_random_choice()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment