Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
1e3fcc7c
Unverified
Commit
1e3fcc7c
authored
Aug 31, 2020
by
xiang song(charlie.song)
Committed by
GitHub
Aug 31, 2020
Browse files
update comments (#2132)
Co-authored-by:
Ubuntu
<
ubuntu@ip-172-31-51-214.ec2.internal
>
parent
7993a4d8
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
19 additions
and
5 deletions
+19
-5
examples/pytorch/gcmc/train_sampling.py
examples/pytorch/gcmc/train_sampling.py
+2
-0
examples/pytorch/graphsage/train_cv_multi_gpu.py
examples/pytorch/graphsage/train_cv_multi_gpu.py
+2
-0
examples/pytorch/graphsage/train_sampling.py
examples/pytorch/graphsage/train_sampling.py
+2
-0
examples/pytorch/graphsage/train_sampling_multi_gpu.py
examples/pytorch/graphsage/train_sampling_multi_gpu.py
+2
-0
examples/pytorch/graphsage/train_sampling_unsupervised.py
examples/pytorch/graphsage/train_sampling_unsupervised.py
+3
-0
examples/pytorch/ogb/cluster-sage/sampler.py
examples/pytorch/ogb/cluster-sage/sampler.py
+0
-5
examples/pytorch/ogb/ogbn-products/gat/main.py
examples/pytorch/ogb/ogbn-products/gat/main.py
+3
-0
examples/pytorch/ogb/ogbn-products/graphsage/main.py
examples/pytorch/ogb/ogbn-products/graphsage/main.py
+2
-0
examples/pytorch/rgcn/entity_classify_mp.py
examples/pytorch/rgcn/entity_classify_mp.py
+3
-0
No files found.
examples/pytorch/gcmc/train_sampling.py
View file @
1e3fcc7c
...
@@ -403,6 +403,8 @@ if __name__ == '__main__':
...
@@ -403,6 +403,8 @@ if __name__ == '__main__':
run
(
0
,
n_gpus
,
args
,
devices
,
dataset
)
run
(
0
,
n_gpus
,
args
,
devices
,
dataset
)
# multi gpu
# multi gpu
else
:
else
:
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
dataset
.
train_enc_graph
.
create_formats_
()
dataset
.
train_enc_graph
.
create_formats_
()
dataset
.
train_dec_graph
.
create_formats_
()
dataset
.
train_dec_graph
.
create_formats_
()
procs
=
[]
procs
=
[]
...
...
examples/pytorch/graphsage/train_cv_multi_gpu.py
View file @
1e3fcc7c
...
@@ -384,6 +384,8 @@ if __name__ == '__main__':
...
@@ -384,6 +384,8 @@ if __name__ == '__main__':
g
.
ndata
[
'features'
]
=
features
.
share_memory_
()
g
.
ndata
[
'features'
]
=
features
.
share_memory_
()
create_history_storage
(
g
,
args
,
n_classes
)
create_history_storage
(
g
,
args
,
n_classes
)
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g
.
create_formats_
()
g
.
create_formats_
()
# Pack data
# Pack data
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
data
=
train_mask
,
val_mask
,
in_feats
,
labels
,
n_classes
,
g
...
...
examples/pytorch/graphsage/train_sampling.py
View file @
1e3fcc7c
...
@@ -229,6 +229,8 @@ if __name__ == '__main__':
...
@@ -229,6 +229,8 @@ if __name__ == '__main__':
else
:
else
:
train_g
=
val_g
=
test_g
=
g
train_g
=
val_g
=
test_g
=
g
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
train_g
.
create_formats_
()
train_g
.
create_formats_
()
val_g
.
create_formats_
()
val_g
.
create_formats_
()
test_g
.
create_formats_
()
test_g
.
create_formats_
()
...
...
examples/pytorch/graphsage/train_sampling_multi_gpu.py
View file @
1e3fcc7c
...
@@ -258,6 +258,8 @@ if __name__ == '__main__':
...
@@ -258,6 +258,8 @@ if __name__ == '__main__':
else
:
else
:
train_g
=
val_g
=
test_g
=
g
train_g
=
val_g
=
test_g
=
g
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
train_g
.
create_formats_
()
train_g
.
create_formats_
()
val_g
.
create_formats_
()
val_g
.
create_formats_
()
test_g
.
create_formats_
()
test_g
.
create_formats_
()
...
...
examples/pytorch/graphsage/train_sampling_unsupervised.py
View file @
1e3fcc7c
...
@@ -298,6 +298,9 @@ def main(args, devices):
...
@@ -298,6 +298,9 @@ def main(args, devices):
val_mask
=
g
.
ndata
[
'val_mask'
]
val_mask
=
g
.
ndata
[
'val_mask'
]
test_mask
=
g
.
ndata
[
'test_mask'
]
test_mask
=
g
.
ndata
[
'test_mask'
]
g
.
ndata
[
'features'
]
=
features
g
.
ndata
[
'features'
]
=
features
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g
.
create_formats_
()
g
.
create_formats_
()
# Pack data
# Pack data
data
=
train_mask
,
val_mask
,
test_mask
,
in_feats
,
labels
,
n_classes
,
g
data
=
train_mask
,
val_mask
,
test_mask
,
in_feats
,
labels
,
n_classes
,
g
...
...
examples/pytorch/ogb/cluster-sage/sampler.py
View file @
1e3fcc7c
...
@@ -63,9 +63,4 @@ class ClusterIter(object):
...
@@ -63,9 +63,4 @@ class ClusterIter(object):
def
subgraph_collate_fn
(
g
,
batch
):
def
subgraph_collate_fn
(
g
,
batch
):
nids
=
np
.
concatenate
(
batch
).
reshape
(
-
1
).
astype
(
np
.
int64
)
nids
=
np
.
concatenate
(
batch
).
reshape
(
-
1
).
astype
(
np
.
int64
)
g1
=
g
.
subgraph
(
nids
)
g1
=
g
.
subgraph
(
nids
)
nid
=
g1
.
ndata
[
dgl
.
NID
]
g1
.
ndata
[
'feat'
]
=
g
.
ndata
[
'feat'
][
nid
]
g1
.
ndata
[
'labels'
]
=
g
.
ndata
[
'labels'
][
nid
]
g1
.
ndata
[
'train_mask'
]
=
g
.
ndata
[
'train_mask'
][
nid
]
g1
.
create_formats_
()
return
g1
return
g1
examples/pytorch/ogb/ogbn-products/gat/main.py
View file @
1e3fcc7c
...
@@ -243,6 +243,9 @@ if __name__ == '__main__':
...
@@ -243,6 +243,9 @@ if __name__ == '__main__':
in_feats
=
graph
.
ndata
[
'feat'
].
shape
[
1
]
in_feats
=
graph
.
ndata
[
'feat'
].
shape
[
1
]
n_classes
=
(
labels
.
max
()
+
1
).
item
()
n_classes
=
(
labels
.
max
()
+
1
).
item
()
# Create csr/coo/csc formats before launching sampling processes
# This avoids creating certain formats in each data loader process, which saves momory and CPU.
graph
.
create_formats_
()
graph
.
create_formats_
()
# Pack data
# Pack data
data
=
train_idx
,
val_idx
,
test_idx
,
in_feats
,
labels
,
n_classes
,
graph
,
args
.
head
data
=
train_idx
,
val_idx
,
test_idx
,
in_feats
,
labels
,
n_classes
,
graph
,
args
.
head
...
...
examples/pytorch/ogb/ogbn-products/graphsage/main.py
View file @
1e3fcc7c
...
@@ -234,6 +234,8 @@ if __name__ == '__main__':
...
@@ -234,6 +234,8 @@ if __name__ == '__main__':
in_feats
=
graph
.
ndata
[
'feat'
].
shape
[
1
]
in_feats
=
graph
.
ndata
[
'feat'
].
shape
[
1
]
n_classes
=
(
labels
.
max
()
+
1
).
item
()
n_classes
=
(
labels
.
max
()
+
1
).
item
()
# Create csr/coo/csc formats before launching sampling processes
# This avoids creating certain formats in each data loader process, which saves momory and CPU.
graph
.
create_formats_
()
graph
.
create_formats_
()
# Pack data
# Pack data
data
=
train_idx
,
val_idx
,
test_idx
,
in_feats
,
labels
,
n_classes
,
graph
data
=
train_idx
,
val_idx
,
test_idx
,
in_feats
,
labels
,
n_classes
,
graph
...
...
examples/pytorch/rgcn/entity_classify_mp.py
View file @
1e3fcc7c
...
@@ -504,6 +504,9 @@ def main(args, devices):
...
@@ -504,6 +504,9 @@ def main(args, devices):
train_idx
.
share_memory_
()
train_idx
.
share_memory_
()
val_idx
.
share_memory_
()
val_idx
.
share_memory_
()
test_idx
.
share_memory_
()
test_idx
.
share_memory_
()
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g
.
create_formats_
()
n_gpus
=
len
(
devices
)
n_gpus
=
len
(
devices
)
# cpu
# cpu
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment