Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
8da604d7
Unverified
Commit
8da604d7
authored
Jan 23, 2024
by
Muhammed Fatih BALIN
Committed by
GitHub
Jan 23, 2024
Browse files
[GraphBolt][CUDA] Refine pinning tests (#6994)
parent
3df4f8cb
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
16 additions
and
16 deletions
+16
-16
tests/python/pytorch/graphbolt/impl/test_fused_csc_sampling_graph.py
...n/pytorch/graphbolt/impl/test_fused_csc_sampling_graph.py
+8
-12
tests/python/pytorch/graphbolt/impl/test_torch_based_feature_store.py
.../pytorch/graphbolt/impl/test_torch_based_feature_store.py
+8
-4
No files found.
tests/python/pytorch/graphbolt/impl/test_fused_csc_sampling_graph.py
View file @
8da604d7
...
@@ -1623,6 +1623,8 @@ def test_sample_neighbors_homo(labor, is_pinned):
...
@@ -1623,6 +1623,8 @@ def test_sample_neighbors_homo(labor, is_pinned):
0 1 0 0 1
0 1 0 0 1
1 0 0 0 1
1 0 0 0 1
"""
"""
if
F
.
_default_context_str
==
"cpu"
and
is_pinned
:
pytest
.
skip
(
"Pinning is not meaningful without a GPU."
)
# Initialize data.
# Initialize data.
total_num_edges
=
12
total_num_edges
=
12
indptr
=
torch
.
LongTensor
([
0
,
3
,
5
,
7
,
9
,
12
])
indptr
=
torch
.
LongTensor
([
0
,
3
,
5
,
7
,
9
,
12
])
...
@@ -1631,12 +1633,9 @@ def test_sample_neighbors_homo(labor, is_pinned):
...
@@ -1631,12 +1633,9 @@ def test_sample_neighbors_homo(labor, is_pinned):
assert
indptr
[
-
1
]
==
len
(
indices
)
assert
indptr
[
-
1
]
==
len
(
indices
)
# Construct FusedCSCSamplingGraph.
# Construct FusedCSCSamplingGraph.
graph
=
gb
.
fused_csc_sampling_graph
(
indptr
,
indices
)
graph
=
gb
.
fused_csc_sampling_graph
(
indptr
,
indices
).
to
(
if
F
.
_default_context_str
==
"gpu"
:
"pinned"
if
is_pinned
else
F
.
ctx
()
if
is_pinned
:
)
graph
.
pin_memory_
()
else
:
graph
=
graph
.
to
(
F
.
ctx
())
# Generate subgraph via sample neighbors.
# Generate subgraph via sample neighbors.
nodes
=
torch
.
LongTensor
([
1
,
3
,
4
]).
to
(
F
.
ctx
())
nodes
=
torch
.
LongTensor
([
1
,
3
,
4
]).
to
(
F
.
ctx
())
...
@@ -1883,6 +1882,8 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
...
@@ -1883,6 +1882,8 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
0 1 0 0 1
0 1 0 0 1
1 0 0 0 1
1 0 0 0 1
"""
"""
if
F
.
_default_context_str
==
"cpu"
and
is_pinned
:
pytest
.
skip
(
"Pinning is not meaningful without a GPU."
)
# Initialize data.
# Initialize data.
total_num_edges
=
12
total_num_edges
=
12
indptr
=
torch
.
LongTensor
([
0
,
3
,
5
,
7
,
9
,
12
])
indptr
=
torch
.
LongTensor
([
0
,
3
,
5
,
7
,
9
,
12
])
...
@@ -1896,12 +1897,7 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
...
@@ -1896,12 +1897,7 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
# Construct FusedCSCSamplingGraph.
# Construct FusedCSCSamplingGraph.
graph
=
gb
.
fused_csc_sampling_graph
(
graph
=
gb
.
fused_csc_sampling_graph
(
indptr
,
indices
,
edge_attributes
=
edge_attributes
indptr
,
indices
,
edge_attributes
=
edge_attributes
)
).
to
(
"pinned"
if
is_pinned
else
F
.
ctx
())
if
F
.
_default_context_str
==
"gpu"
:
if
is_pinned
:
graph
.
pin_memory_
()
else
:
graph
=
graph
.
to
(
F
.
ctx
())
# Generate subgraph via sample neighbors.
# Generate subgraph via sample neighbors.
nodes
=
torch
.
LongTensor
([
1
,
3
,
4
]).
to
(
F
.
ctx
())
nodes
=
torch
.
LongTensor
([
1
,
3
,
4
]).
to
(
F
.
ctx
())
...
...
tests/python/pytorch/graphbolt/impl/test_torch_based_feature_store.py
View file @
8da604d7
...
@@ -207,7 +207,8 @@ def test_feature_store_to_device(device):
...
@@ -207,7 +207,8 @@ def test_feature_store_to_device(device):
)
)
@
pytest
.
mark
.
parametrize
(
"idtype"
,
[
torch
.
int32
,
torch
.
int64
])
@
pytest
.
mark
.
parametrize
(
"idtype"
,
[
torch
.
int32
,
torch
.
int64
])
@
pytest
.
mark
.
parametrize
(
"shape"
,
[(
2
,
1
),
(
2
,
3
),
(
2
,
2
,
2
),
(
137
,
13
,
3
)])
@
pytest
.
mark
.
parametrize
(
"shape"
,
[(
2
,
1
),
(
2
,
3
),
(
2
,
2
,
2
),
(
137
,
13
,
3
)])
def
test_torch_based_pinned_feature
(
dtype
,
idtype
,
shape
):
@
pytest
.
mark
.
parametrize
(
"in_place"
,
[
False
,
True
])
def
test_torch_based_pinned_feature
(
dtype
,
idtype
,
shape
,
in_place
):
if
dtype
==
torch
.
complex128
:
if
dtype
==
torch
.
complex128
:
tensor
=
torch
.
complex
(
tensor
=
torch
.
complex
(
torch
.
randint
(
0
,
13
,
shape
,
dtype
=
torch
.
float64
),
torch
.
randint
(
0
,
13
,
shape
,
dtype
=
torch
.
float64
),
...
@@ -219,10 +220,13 @@ def test_torch_based_pinned_feature(dtype, idtype, shape):
...
@@ -219,10 +220,13 @@ def test_torch_based_pinned_feature(dtype, idtype, shape):
test_tensor_cuda
=
test_tensor
.
cuda
()
test_tensor_cuda
=
test_tensor
.
cuda
()
feature
=
gb
.
TorchBasedFeature
(
tensor
)
feature
=
gb
.
TorchBasedFeature
(
tensor
)
feature
.
pin_memory_
()
if
in_place
:
feature
.
pin_memory_
()
# Check if pinning is truly in-place.
# Check if pinning is truly in-place.
assert
feature
.
_tensor
.
data_ptr
()
==
tensor
.
data_ptr
()
assert
feature
.
_tensor
.
data_ptr
()
==
tensor
.
data_ptr
()
else
:
feature
=
feature
.
to
(
"pinned"
)
# Test read entire pinned feature, the result should be on cuda.
# Test read entire pinned feature, the result should be on cuda.
assert
torch
.
equal
(
feature
.
read
(),
test_tensor_cuda
)
assert
torch
.
equal
(
feature
.
read
(),
test_tensor_cuda
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment