Unverified Commit d67dae1d authored by Ramon Zhou's avatar Ramon Zhou Committed by GitHub
Browse files

[GraphBolt] Modify multigpu example arguments (#6990)

parent 99a02f07
...@@ -134,11 +134,11 @@ def create_dataloader( ...@@ -134,11 +134,11 @@ def create_dataloader(
# [Output]: # [Output]:
# A CopyTo object copying data in the datapipe to a specified device.\ # A CopyTo object copying data in the datapipe to a specified device.\
############################################################################ ############################################################################
if not args.cpu_sampling: if args.storage_device != "cpu":
datapipe = datapipe.copy_to(device, extra_attrs=["seed_nodes"]) datapipe = datapipe.copy_to(device, extra_attrs=["seed_nodes"])
datapipe = datapipe.sample_neighbor(graph, args.fanout) datapipe = datapipe.sample_neighbor(graph, args.fanout)
datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"]) datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
if args.cpu_sampling: if args.storage_device == "cpu":
datapipe = datapipe.copy_to(device) datapipe = datapipe.copy_to(device)
dataloader = gb.DataLoader(datapipe, args.num_workers) dataloader = gb.DataLoader(datapipe, args.num_workers)
...@@ -276,7 +276,7 @@ def run(rank, world_size, args, devices, dataset): ...@@ -276,7 +276,7 @@ def run(rank, world_size, args, devices, dataset):
) )
# Pin the graph and features to enable GPU access. # Pin the graph and features to enable GPU access.
if not args.cpu_sampling: if args.storage_device == "pinned":
dataset.graph.pin_memory_() dataset.graph.pin_memory_()
dataset.feature.pin_memory_() dataset.feature.pin_memory_()
...@@ -388,15 +388,17 @@ def parse_args(): ...@@ -388,15 +388,17 @@ def parse_args():
type=str, type=str,
default="10,10,10", default="10,10,10",
help="Fan-out of neighbor sampling. It is IMPORTANT to keep len(fanout)" help="Fan-out of neighbor sampling. It is IMPORTANT to keep len(fanout)"
" identical with the number of layers in your model. Default: 15,10,5", " identical with the number of layers in your model. Default: 10,10,10",
) )
parser.add_argument( parser.add_argument(
"--num-workers", type=int, default=0, help="The number of processes." "--num-workers", type=int, default=0, help="The number of processes."
) )
parser.add_argument( parser.add_argument(
"--cpu-sampling", "--mode",
action="store_true", default="pinned-cuda",
help="Disables GPU sampling and utilizes the CPU for dataloading.", choices=["cpu-cuda", "pinned-cuda"],
help="Dataset storage placement and Train device: 'cpu' for CPU and RAM,"
" 'pinned' for pinned memory in RAM, 'cuda' for GPU and GPU memory.",
) )
return parser.parse_args() return parser.parse_args()
...@@ -406,6 +408,7 @@ if __name__ == "__main__": ...@@ -406,6 +408,7 @@ if __name__ == "__main__":
if not torch.cuda.is_available(): if not torch.cuda.is_available():
print(f"Multi-gpu training needs to be in gpu mode.") print(f"Multi-gpu training needs to be in gpu mode.")
exit(0) exit(0)
args.storage_device, _ = args.mode.split("-")
devices = list(map(int, args.gpu.split(","))) devices = list(map(int, args.gpu.split(",")))
world_size = len(devices) world_size = len(devices)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment