Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
a2e1c796
Unverified
Commit
a2e1c796
authored
Feb 05, 2024
by
Muhammed Fatih BALIN
Committed by
GitHub
Feb 05, 2024
Browse files
[GraphBolt][CUDA] Pipelined sampling accuracy fix (#7088)
parent
4ee0a8bd
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
25 additions
and
10 deletions
+25
-10
python/dgl/graphbolt/impl/neighbor_sampler.py
python/dgl/graphbolt/impl/neighbor_sampler.py
+19
-8
tests/python/pytorch/graphbolt/impl/test_neighbor_sampler.py
tests/python/pytorch/graphbolt/impl/test_neighbor_sampler.py
+6
-2
No files found.
python/dgl/graphbolt/impl/neighbor_sampler.py
View file @
a2e1c796
...
@@ -48,22 +48,33 @@ class FetchInsubgraphData(Mapper):
...
@@ -48,22 +48,33 @@ class FetchInsubgraphData(Mapper):
with
torch
.
cuda
.
stream
(
self
.
stream
):
with
torch
.
cuda
.
stream
(
self
.
stream
):
index
=
minibatch
.
_seed_nodes
index
=
minibatch
.
_seed_nodes
if
isinstance
(
index
,
dict
):
if
isinstance
(
index
,
dict
):
for
idx
in
index
.
values
():
idx
.
record_stream
(
torch
.
cuda
.
current_stream
())
index
=
self
.
graph
.
_convert_to_homogeneous_nodes
(
index
)
index
=
self
.
graph
.
_convert_to_homogeneous_nodes
(
index
)
else
:
index
.
record_stream
(
torch
.
cuda
.
current_stream
())
def
record_stream
(
tensor
):
if
stream
is
not
None
and
tensor
.
is_cuda
:
tensor
.
record_stream
(
stream
)
return
tensor
i
ndex
,
original_positions
=
index
.
sort
()
i
f
self
.
graph
.
node_type_offset
is
None
:
if
(
original_positions
.
diff
()
==
1
).
all
().
item
():
# is_sort
ed
# sorting not need
ed
.
minibatch
.
_subgraph_seed_nodes
=
None
minibatch
.
_subgraph_seed_nodes
=
None
else
:
else
:
minibatch
.
_subgraph_seed_nodes
=
original_positions
index
,
original_positions
=
index
.
sort
()
index
.
record_stream
(
torch
.
cuda
.
current_stream
())
if
(
original_positions
.
diff
()
==
1
).
all
().
item
():
# already sorted.
minibatch
.
_subgraph_seed_nodes
=
None
else
:
minibatch
.
_subgraph_seed_nodes
=
record_stream
(
original_positions
.
sort
()[
1
]
)
index_select_csc_with_indptr
=
partial
(
index_select_csc_with_indptr
=
partial
(
torch
.
ops
.
graphbolt
.
index_select_csc
,
self
.
graph
.
csc_indptr
torch
.
ops
.
graphbolt
.
index_select_csc
,
self
.
graph
.
csc_indptr
)
)
def
record_stream
(
tensor
):
if
stream
is
not
None
and
tensor
.
is_cuda
:
tensor
.
record_stream
(
stream
)
indptr
,
indices
=
index_select_csc_with_indptr
(
indptr
,
indices
=
index_select_csc_with_indptr
(
self
.
graph
.
indices
,
index
,
None
self
.
graph
.
indices
,
index
,
None
)
)
...
...
tests/python/pytorch/graphbolt/impl/test_neighbor_sampler.py
View file @
a2e1c796
...
@@ -41,8 +41,12 @@ def get_hetero_graph():
...
@@ -41,8 +41,12 @@ def get_hetero_graph():
@
unittest
.
skipIf
(
F
.
_default_context_str
!=
"gpu"
,
reason
=
"Enabled only on GPU."
)
@
unittest
.
skipIf
(
F
.
_default_context_str
!=
"gpu"
,
reason
=
"Enabled only on GPU."
)
@
pytest
.
mark
.
parametrize
(
"hetero"
,
[
False
,
True
])
@
pytest
.
mark
.
parametrize
(
"hetero"
,
[
False
,
True
])
@
pytest
.
mark
.
parametrize
(
"prob_name"
,
[
None
,
"weight"
,
"mask"
])
@
pytest
.
mark
.
parametrize
(
"prob_name"
,
[
None
,
"weight"
,
"mask"
])
def
test_NeighborSampler_GraphFetch
(
hetero
,
prob_name
):
@
pytest
.
mark
.
parametrize
(
"sorted"
,
[
False
,
True
])
items
=
torch
.
arange
(
3
)
def
test_NeighborSampler_GraphFetch
(
hetero
,
prob_name
,
sorted
):
if
sorted
:
items
=
torch
.
arange
(
3
)
else
:
items
=
torch
.
tensor
([
2
,
0
,
1
])
names
=
"seed_nodes"
names
=
"seed_nodes"
itemset
=
gb
.
ItemSet
(
items
,
names
=
names
)
itemset
=
gb
.
ItemSet
(
items
,
names
=
names
)
graph
=
get_hetero_graph
().
to
(
F
.
ctx
())
graph
=
get_hetero_graph
().
to
(
F
.
ctx
())
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment