Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
15695ed0
"tests/python/common/test_heterograph-remove.py" did not exist on "ff519f98c317eaffc1f753ad8fb28f4c4280596e"
Unverified
Commit
15695ed0
authored
Feb 03, 2024
by
Muhammed Fatih BALIN
Committed by
GitHub
Feb 02, 2024
Browse files
[GraphBolt][CUDA] Handle edge case of %100 cache hit rate. (#7080)
parent
0a42d863
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
3 deletions
+23
-3
graphbolt/src/cuda/gpu_cache.cu
graphbolt/src/cuda/gpu_cache.cu
+1
-0
tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py
.../python/pytorch/graphbolt/impl/test_gpu_cached_feature.py
+22
-3
No files found.
graphbolt/src/cuda/gpu_cache.cu
View file @
15695ed0
...
...
@@ -78,6 +78,7 @@ void GpuCache::Replace(torch::Tensor keys, torch::Tensor values) {
"Values should have the correct dimensions."
);
TORCH_CHECK
(
values
.
scalar_type
()
==
dtype_
,
"Values should have the correct dtype."
);
if
(
keys
.
numel
()
==
0
)
return
;
keys
=
keys
.
to
(
torch
::
kLong
);
torch
::
Tensor
float_values
;
if
(
num_bytes_
%
sizeof
(
float
)
!=
0
)
{
...
...
tests/python/pytorch/graphbolt/impl/test_gpu_cached_feature.py
View file @
15695ed0
...
...
@@ -28,14 +28,16 @@ from dgl import graphbolt as gb
torch
.
float64
,
],
)
def
test_gpu_cached_feature
(
dtype
):
@
pytest
.
mark
.
parametrize
(
"cache_size_a"
,
[
1
,
1024
])
@
pytest
.
mark
.
parametrize
(
"cache_size_b"
,
[
1
,
1024
])
def
test_gpu_cached_feature
(
dtype
,
cache_size_a
,
cache_size_b
):
a
=
torch
.
tensor
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
dtype
=
dtype
,
pin_memory
=
True
)
b
=
torch
.
tensor
(
[[[
1
,
2
],
[
3
,
4
]],
[[
4
,
5
],
[
6
,
7
]]],
dtype
=
dtype
,
pin_memory
=
True
)
feat_store_a
=
gb
.
GPUCachedFeature
(
gb
.
TorchBasedFeature
(
a
),
2
)
feat_store_b
=
gb
.
GPUCachedFeature
(
gb
.
TorchBasedFeature
(
b
),
1
)
feat_store_a
=
gb
.
GPUCachedFeature
(
gb
.
TorchBasedFeature
(
a
),
cache_size_a
)
feat_store_b
=
gb
.
GPUCachedFeature
(
gb
.
TorchBasedFeature
(
b
),
cache_size_b
)
# Test read the entire feature.
assert
torch
.
equal
(
feat_store_a
.
read
(),
a
.
to
(
"cuda"
))
...
...
@@ -52,6 +54,23 @@ def test_gpu_cached_feature(dtype):
"cuda"
),
)
assert
torch
.
equal
(
feat_store_a
.
read
(
torch
.
tensor
([
1
,
1
]).
to
(
"cuda"
)),
torch
.
tensor
([[
4
,
5
,
6
],
[
4
,
5
,
6
]],
dtype
=
dtype
).
to
(
"cuda"
),
)
assert
torch
.
equal
(
feat_store_b
.
read
(
torch
.
tensor
([
0
]).
to
(
"cuda"
)),
torch
.
tensor
([[[
1
,
2
],
[
3
,
4
]]],
dtype
=
dtype
).
to
(
"cuda"
),
)
# The cache should be full now for the large cache sizes, %100 hit expected.
if
cache_size_a
>=
1024
:
total_miss
=
feat_store_a
.
_feature
.
total_miss
feat_store_a
.
read
(
torch
.
tensor
([
0
,
1
]).
to
(
"cuda"
))
assert
total_miss
==
feat_store_a
.
_feature
.
total_miss
if
cache_size_b
>=
1024
:
total_miss
=
feat_store_b
.
_feature
.
total_miss
feat_store_b
.
read
(
torch
.
tensor
([
0
,
1
]).
to
(
"cuda"
))
assert
total_miss
==
feat_store_b
.
_feature
.
total_miss
# Test get the size of the entire feature with ids.
assert
feat_store_a
.
size
()
==
torch
.
Size
([
3
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment