Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
ca5a13fe
Unverified
Commit
ca5a13fe
authored
Aug 04, 2020
by
Zihao Ye
Committed by
GitHub
Aug 04, 2020
Browse files
[hotfix] Replace `request_format` and related APIs with `create_format_` (#1924)
* upd * upd * upd * upd
parent
2c141229
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
10 additions
and
70 deletions
+10
-70
examples/pytorch/gcmc/train_sampling.py
examples/pytorch/gcmc/train_sampling.py
+2
-14
examples/pytorch/graphsage/train_cv.py
examples/pytorch/graphsage/train_cv.py
+1
-13
examples/pytorch/graphsage/train_cv_multi_gpu.py
examples/pytorch/graphsage/train_cv_multi_gpu.py
+1
-13
examples/pytorch/graphsage/train_sampling.py
examples/pytorch/graphsage/train_sampling.py
+3
-15
examples/pytorch/graphsage/train_sampling_multi_gpu.py
examples/pytorch/graphsage/train_sampling_multi_gpu.py
+3
-15
No files found.
examples/pytorch/gcmc/train_sampling.py
View file @
ca5a13fe
...
@@ -214,18 +214,6 @@ def thread_wrapped_func(func):
...
@@ -214,18 +214,6 @@ def thread_wrapped_func(func):
raise
exception
.
__class__
(
trace
)
raise
exception
.
__class__
(
trace
)
return
decorated_function
return
decorated_function
def
prepare_mp
(
g
):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
for
etype
in
g
.
canonical_etypes
:
g
.
in_degree
(
0
,
etype
=
etype
)
g
.
out_degree
(
0
,
etype
=
etype
)
g
.
find_edges
([
0
],
etype
=
etype
)
def
config
():
def
config
():
parser
=
argparse
.
ArgumentParser
(
description
=
'GCMC'
)
parser
=
argparse
.
ArgumentParser
(
description
=
'GCMC'
)
parser
.
add_argument
(
'--seed'
,
default
=
123
,
type
=
int
)
parser
.
add_argument
(
'--seed'
,
default
=
123
,
type
=
int
)
...
@@ -468,8 +456,8 @@ if __name__ == '__main__':
...
@@ -468,8 +456,8 @@ if __name__ == '__main__':
run
(
0
,
n_gpus
,
args
,
devices
,
dataset
)
run
(
0
,
n_gpus
,
args
,
devices
,
dataset
)
# multi gpu
# multi gpu
else
:
else
:
prepare_mp
(
dataset
.
train_enc_graph
)
dataset
.
train_enc_graph
.
create_format_
(
)
prepare_mp
(
dataset
.
train_dec_graph
)
dataset
.
train_dec_graph
.
create_format_
(
)
procs
=
[]
procs
=
[]
for
proc_id
in
range
(
n_gpus
):
for
proc_id
in
range
(
n_gpus
):
p
=
mp
.
Process
(
target
=
run
,
args
=
(
proc_id
,
n_gpus
,
args
,
devices
,
dataset
))
p
=
mp
.
Process
(
target
=
run
,
args
=
(
proc_id
,
n_gpus
,
args
,
devices
,
dataset
))
...
...
examples/pytorch/graphsage/train_cv.py
View file @
ca5a13fe
...
@@ -147,18 +147,6 @@ class NeighborSampler(object):
...
@@ -147,18 +147,6 @@ class NeighborSampler(object):
hist_blocks
.
insert
(
0
,
hist_block
)
hist_blocks
.
insert
(
0
,
hist_block
)
return
blocks
,
hist_blocks
return
blocks
,
hist_blocks
def
prepare_mp
(
g
):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g
.
in_degree
(
0
)
g
.
out_degree
(
0
)
g
.
find_edges
([
0
])
def
compute_acc
(
pred
,
labels
):
def
compute_acc
(
pred
,
labels
):
"""
"""
Compute the accuracy of prediction given the labels.
Compute the accuracy of prediction given the labels.
...
@@ -332,7 +320,7 @@ if __name__ == '__main__':
...
@@ -332,7 +320,7 @@ if __name__ == '__main__':
train_mask
=
g
.
ndata
[
'train_mask'
]
train_mask
=
g
.
ndata
[
'train_mask'
]
val_mask
=
g
.
ndata
[
'val_mask'
]
val_mask
=
g
.
ndata
[
'val_mask'
]
g
.
ndata
[
'features'
]
=
features
g
.
ndata
[
'features'
]
=
features
prepare_mp
(
g
)
g
.
create_format_
(
)
# Pack data
# Pack data
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
...
...
examples/pytorch/graphsage/train_cv_multi_gpu.py
View file @
ca5a13fe
...
@@ -175,18 +175,6 @@ def thread_wrapped_func(func):
...
@@ -175,18 +175,6 @@ def thread_wrapped_func(func):
raise
exception
.
__class__
(
trace
)
raise
exception
.
__class__
(
trace
)
return
decorated_function
return
decorated_function
def
prepare_mp
(
g
):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g
.
in_degree
(
0
)
g
.
out_degree
(
0
)
g
.
find_edges
([
0
])
def
compute_acc
(
pred
,
labels
):
def
compute_acc
(
pred
,
labels
):
"""
"""
Compute the accuracy of prediction given the labels.
Compute the accuracy of prediction given the labels.
...
@@ -397,7 +385,7 @@ if __name__ == '__main__':
...
@@ -397,7 +385,7 @@ if __name__ == '__main__':
g
.
ndata
[
'features'
]
=
features
.
share_memory_
()
g
.
ndata
[
'features'
]
=
features
.
share_memory_
()
create_history_storage
(
g
,
args
,
n_classes
)
create_history_storage
(
g
,
args
,
n_classes
)
prepare_mp
(
g
)
g
.
create_format_
(
)
# Pack data
# Pack data
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
...
...
examples/pytorch/graphsage/train_sampling.py
View file @
ca5a13fe
...
@@ -89,18 +89,6 @@ class SAGE(nn.Module):
...
@@ -89,18 +89,6 @@ class SAGE(nn.Module):
x
=
y
x
=
y
return
y
return
y
def
prepare_mp
(
g
):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g
.
in_degree
(
0
)
g
.
out_degree
(
0
)
g
.
find_edges
([
0
])
def
compute_acc
(
pred
,
labels
):
def
compute_acc
(
pred
,
labels
):
"""
"""
Compute the accuracy of prediction given the labels.
Compute the accuracy of prediction given the labels.
...
@@ -241,9 +229,9 @@ if __name__ == '__main__':
...
@@ -241,9 +229,9 @@ if __name__ == '__main__':
else
:
else
:
train_g
=
val_g
=
test_g
=
g
train_g
=
val_g
=
test_g
=
g
prepare_mp
(
train_g
)
train_g
.
create_format_
(
)
prepare_mp
(
val_g
)
val_g
.
create_format_
(
)
prepare_mp
(
tes
t_
g
)
test_g
.
create_forma
t_
(
)
# Pack data
# Pack data
data
=
in_feats
,
n_classes
,
train_g
,
val_g
,
test_g
data
=
in_feats
,
n_classes
,
train_g
,
val_g
,
test_g
...
...
examples/pytorch/graphsage/train_sampling_multi_gpu.py
View file @
ca5a13fe
...
@@ -90,18 +90,6 @@ class SAGE(nn.Module):
...
@@ -90,18 +90,6 @@ class SAGE(nn.Module):
x
=
y
x
=
y
return
y
return
y
def
prepare_mp
(
g
):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g
.
in_degrees
(
0
)
g
.
out_degrees
(
0
)
g
.
find_edges
([
0
])
def
compute_acc
(
pred
,
labels
):
def
compute_acc
(
pred
,
labels
):
"""
"""
Compute the accuracy of prediction given the labels.
Compute the accuracy of prediction given the labels.
...
@@ -269,9 +257,9 @@ if __name__ == '__main__':
...
@@ -269,9 +257,9 @@ if __name__ == '__main__':
else
:
else
:
train_g
=
val_g
=
test_g
=
g
train_g
=
val_g
=
test_g
=
g
prepare_mp
(
train_g
)
train_g
.
create_format_
(
)
prepare_mp
(
val_g
)
val_g
.
create_format_
(
)
prepare_mp
(
tes
t_
g
)
test_g
.
create_forma
t_
(
)
# Pack data
# Pack data
data
=
in_feats
,
n_classes
,
train_g
,
val_g
,
test_g
data
=
in_feats
,
n_classes
,
train_g
,
val_g
,
test_g
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment