"git@developer.sourcefind.cn:change/sglang.git" did not exist on "a8552cb18b452e9a0a7e421651caf9d3c4deb673"
Unverified Commit 8da604d7 authored by Muhammed Fatih BALIN's avatar Muhammed Fatih BALIN Committed by GitHub
Browse files

[GraphBolt][CUDA] Refine pinning tests (#6994)

parent 3df4f8cb
......@@ -1623,6 +1623,8 @@ def test_sample_neighbors_homo(labor, is_pinned):
0 1 0 0 1
1 0 0 0 1
"""
if F._default_context_str == "cpu" and is_pinned:
pytest.skip("Pinning is not meaningful without a GPU.")
# Initialize data.
total_num_edges = 12
indptr = torch.LongTensor([0, 3, 5, 7, 9, 12])
......@@ -1631,12 +1633,9 @@ def test_sample_neighbors_homo(labor, is_pinned):
assert indptr[-1] == len(indices)
# Construct FusedCSCSamplingGraph.
graph = gb.fused_csc_sampling_graph(indptr, indices)
if F._default_context_str == "gpu":
if is_pinned:
graph.pin_memory_()
else:
graph = graph.to(F.ctx())
graph = gb.fused_csc_sampling_graph(indptr, indices).to(
"pinned" if is_pinned else F.ctx()
)
# Generate subgraph via sample neighbors.
nodes = torch.LongTensor([1, 3, 4]).to(F.ctx())
......@@ -1883,6 +1882,8 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
0 1 0 0 1
1 0 0 0 1
"""
if F._default_context_str == "cpu" and is_pinned:
pytest.skip("Pinning is not meaningful without a GPU.")
# Initialize data.
total_num_edges = 12
indptr = torch.LongTensor([0, 3, 5, 7, 9, 12])
......@@ -1896,12 +1897,7 @@ def test_sample_neighbors_return_eids_homo(labor, is_pinned):
# Construct FusedCSCSamplingGraph.
graph = gb.fused_csc_sampling_graph(
indptr, indices, edge_attributes=edge_attributes
)
if F._default_context_str == "gpu":
if is_pinned:
graph.pin_memory_()
else:
graph = graph.to(F.ctx())
).to("pinned" if is_pinned else F.ctx())
# Generate subgraph via sample neighbors.
nodes = torch.LongTensor([1, 3, 4]).to(F.ctx())
......
......@@ -207,7 +207,8 @@ def test_feature_store_to_device(device):
)
@pytest.mark.parametrize("idtype", [torch.int32, torch.int64])
@pytest.mark.parametrize("shape", [(2, 1), (2, 3), (2, 2, 2), (137, 13, 3)])
def test_torch_based_pinned_feature(dtype, idtype, shape):
@pytest.mark.parametrize("in_place", [False, True])
def test_torch_based_pinned_feature(dtype, idtype, shape, in_place):
if dtype == torch.complex128:
tensor = torch.complex(
torch.randint(0, 13, shape, dtype=torch.float64),
......@@ -219,10 +220,13 @@ def test_torch_based_pinned_feature(dtype, idtype, shape):
test_tensor_cuda = test_tensor.cuda()
feature = gb.TorchBasedFeature(tensor)
if in_place:
feature.pin_memory_()
# Check if pinning is truly in-place.
assert feature._tensor.data_ptr() == tensor.data_ptr()
else:
feature = feature.to("pinned")
# Test read entire pinned feature, the result should be on cuda.
assert torch.equal(feature.read(), test_tensor_cuda)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment