"docs/source/api/python/sampler.rst" did not exist on "57daf9c9a6728d15fcc3ad66a9b5616162d9a37b"
2_node_classification.py 11.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
"""
Single Machine Multi-GPU Minibatch Node Classification
======================================================

In this tutorial, you will learn how to use multiple GPUs in training a
graph neural network (GNN) for node classification.

This tutorial assumes that you have read the `Stochastic GNN Training for Node
Classification in DGL <../../notebooks/stochastic_training/node_classification.ipynb>`__.
It also assumes that you know the basics of training general
models with multi-GPU with ``DistributedDataParallel``.

.. note::

   See `this tutorial <https://pytorch.org/tutorials/intermediate/ddp_tutorial.html>`__
   from PyTorch for general multi-GPU training with ``DistributedDataParallel``.  Also,
   see the first section of :doc:`the multi-GPU graph classification
   tutorial <1_graph_classification>`
   for an overview of using ``DistributedDataParallel`` with DGL.

"""


######################################################################
# Importing Packages
# ---------------
#
# We use ``torch.distributed`` to initialize a distributed training context
# and ``torch.multiprocessing`` to spawn multiple processes for each GPU.
#

import os

os.environ["DGLBACKEND"] = "pytorch"
import time

import dgl.graphbolt as gb
import dgl.nn as dglnn
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics.functional as MF
import tqdm
from torch.distributed.algorithms.join import Join
from torch.nn.parallel import DistributedDataParallel as DDP


######################################################################
# Defining Model
# --------------
#
# The model will be again identical to `Stochastic GNN Training for Node
# Classification in DGL <../../notebooks/stochastic_training/node_classification.ipynb>`__.
#


class SAGE(nn.Module):
    def __init__(self, in_size, hidden_size, out_size):
        super().__init__()
        self.layers = nn.ModuleList()
        # Three-layer GraphSAGE-mean.
        self.layers.append(dglnn.SAGEConv(in_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, hidden_size, "mean"))
        self.layers.append(dglnn.SAGEConv(hidden_size, out_size, "mean"))
        self.dropout = nn.Dropout(0.5)
        self.hidden_size = hidden_size
        self.out_size = out_size
        # Set the dtype for the layers manually.
71
        self.float()
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102

    def forward(self, blocks, x):
        hidden_x = x
        for layer_idx, (layer, block) in enumerate(zip(self.layers, blocks)):
            hidden_x = layer(block, hidden_x)
            is_last_layer = layer_idx == len(self.layers) - 1
            if not is_last_layer:
                hidden_x = F.relu(hidden_x)
                hidden_x = self.dropout(hidden_x)
        return hidden_x


######################################################################
# Mini-batch Data Loading
# -----------------------
#
# The major difference from the previous tutorial is that we will use
# ``DistributedItemSampler`` instead of ``ItemSampler`` to sample mini-batches
# of nodes.  ``DistributedItemSampler`` is a distributed version of
# ``ItemSampler`` that works with ``DistributedDataParallel``.  It is
# implemented as a wrapper around ``ItemSampler`` and will sample the same
# minibatch on all replicas.  It also supports dropping the last non-full
# minibatch to avoid the need for padding.
#


def create_dataloader(
    graph,
    features,
    itemset,
    device,
103
    is_train,
104
105
106
):
    datapipe = gb.DistributedItemSampler(
        item_set=itemset,
Rhett Ying's avatar
Rhett Ying committed
107
        batch_size=1024,
108
109
110
        drop_last=is_train,
        shuffle=is_train,
        drop_uneven_inputs=is_train,
111
    )
112
113
114
    datapipe = datapipe.copy_to(device, extra_attrs=["seed_nodes"])
    # Now that we have moved to device, sample_neighbor and fetch_feature steps
    # will be executed on GPUs.
Rhett Ying's avatar
Rhett Ying committed
115
    datapipe = datapipe.sample_neighbor(graph, [10, 10, 10])
116
    datapipe = datapipe.fetch_feature(features, node_feature_keys=["feat"])
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
    return gb.DataLoader(datapipe)


def weighted_reduce(tensor, weight, dst=0):
    ########################################################################
    # (HIGHLIGHT) Collect accuracy and loss values from sub-processes and
    # obtain overall average values.
    #
    # `torch.distributed.reduce` is used to reduce tensors from all the
    # sub-processes to a specified process, ReduceOp.SUM is used by default.
    #
    # Because the GPUs may have differing numbers of processed items, we
    # perform a weighted mean to calculate the exact loss and accuracy.
    ########################################################################
    dist.reduce(tensor=tensor, dst=dst)
    weight = torch.tensor(weight, device=tensor.device)
    dist.reduce(tensor=weight, dst=dst)
    return tensor / weight
135
136
137
138
139
140
141
142
143
144
145


######################################################################
# Evaluation Loop
# ---------------
#
# The evaluation loop is almost identical to the previous tutorial.
#


@torch.no_grad()
Rhett Ying's avatar
Rhett Ying committed
146
def evaluate(rank, model, graph, features, itemset, num_classes, device):
147
148
149
150
151
152
153
    model.eval()
    y = []
    y_hats = []
    dataloader = create_dataloader(
        graph,
        features,
        itemset,
154
155
        device,
        is_train=False,
156
157
    )

158
    for data in tqdm.tqdm(dataloader) if rank == 0 else dataloader:
159
160
161
162
163
164
165
166
167
168
169
170
        blocks = data.blocks
        x = data.node_features["feat"]
        y.append(data.labels)
        y_hats.append(model.module(blocks, x))

    res = MF.accuracy(
        torch.cat(y_hats),
        torch.cat(y),
        task="multiclass",
        num_classes=num_classes,
    )

171
    return res.to(device), sum(y_i.size(0) for y_i in y)
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197


######################################################################
# Training Loop
# -------------
#
# The training loop is also almost identical to the previous tutorial except
# that we use Join Context Manager to solve the uneven input problem. The
# mechanics of Distributed Data Parallel (DDP) training in PyTorch requires
# the number of inputs are the same for all ranks, otherwise the program may
# error or hang. To solve it, PyTorch provides Join Context Manager. Please
# refer to `this tutorial <https://pytorch.org/tutorials/advanced/generic_join.html>`__
# for detailed information.
#


def train(
    rank,
    graph,
    features,
    train_set,
    valid_set,
    num_classes,
    model,
    device,
):
Rhett Ying's avatar
Rhett Ying committed
198
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
199
200
201
202
203
204
    # Create training data loader.
    dataloader = create_dataloader(
        graph,
        features,
        train_set,
        device,
205
        is_train=True,
206
207
    )

Rhett Ying's avatar
Rhett Ying committed
208
    for epoch in range(5):
209
210
211
        epoch_start = time.time()

        model.train()
212
213
        total_loss = torch.tensor(0, dtype=torch.float, device=device)
        num_train_items = 0
214
        with Join([model]):
215
            for data in tqdm.tqdm(dataloader) if rank == 0 else dataloader:
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
                # The input features are from the source nodes in the first
                # layer's computation graph.
                x = data.node_features["feat"]

                # The ground truth labels are from the destination nodes
                # in the last layer's computation graph.
                y = data.labels

                blocks = data.blocks

                y_hat = model(blocks, x)

                # Compute loss.
                loss = F.cross_entropy(y_hat, y)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

235
                total_loss += loss.detach() * y.size(0)
236
                num_train_items += y.size(0)
237
238
239
240

        # Evaluate the model.
        if rank == 0:
            print("Validating...")
241
242
243
244
245
246
247
248
        acc, num_val_items = evaluate(
            rank,
            model,
            graph,
            features,
            valid_set,
            num_classes,
            device,
249
        )
250
251
        total_loss = weighted_reduce(total_loss, num_train_items)
        acc = weighted_reduce(acc * num_val_items, num_val_items)
252

253
254
        # We synchronize before measuring the epoch time.
        torch.cuda.synchronize()
255
256
257
258
        epoch_end = time.time()
        if rank == 0:
            print(
                f"Epoch {epoch:05d} | "
259
                f"Average Loss {total_loss.item():.4f} | "
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
                f"Accuracy {acc.item():.4f} | "
                f"Time {epoch_end - epoch_start:.4f}"
            )


######################################################################
# Defining Traning and Evaluation Procedures
# ------------------------------------------
#
# The following code defines the main function for each process. It is
# similar to the previous tutorial except that we need to initialize a
# distributed training context with ``torch.distributed`` and wrap the model
# with ``torch.nn.parallel.DistributedDataParallel``.
#


Rhett Ying's avatar
Rhett Ying committed
276
def run(rank, world_size, devices, dataset):
277
278
279
280
281
282
283
284
285
286
    # Set up multiprocessing environment.
    device = devices[rank]
    torch.cuda.set_device(device)
    dist.init_process_group(
        backend="nccl",  # Use NCCL backend for distributed GPU training
        init_method="tcp://127.0.0.1:12345",
        world_size=world_size,
        rank=rank,
    )

287
288
289
    # Pin the graph and features in-place to enable GPU access.
    graph = dataset.graph.pin_memory_()
    features = dataset.feature.pin_memory_()
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
    train_set = dataset.tasks[0].train_set
    valid_set = dataset.tasks[0].validation_set
    num_classes = dataset.tasks[0].metadata["num_classes"]

    in_size = features.size("node", None, "feat")[0]
    hidden_size = 256
    out_size = num_classes

    # Create GraphSAGE model. It should be copied onto a GPU as a replica.
    model = SAGE(in_size, hidden_size, out_size).to(device)
    model = DDP(model)

    # Model training.
    if rank == 0:
        print("Training...")
    train(
        rank,
        graph,
        features,
        train_set,
        valid_set,
        num_classes,
        model,
        device,
    )

    # Test the model.
    if rank == 0:
        print("Testing...")
    test_set = dataset.tasks[0].test_set
320
321
322
323
324
325
326
327
    test_acc, num_test_items = evaluate(
        rank,
        model,
        graph,
        features,
        itemset=test_set,
        num_classes=num_classes,
        device=device,
328
    )
329
330
    test_acc = weighted_reduce(test_acc * num_test_items, num_test_items)

331
332
333
334
335
336
337
338
339
340
341
342
343
    if rank == 0:
        print(f"Test Accuracy {test_acc.item():.4f}")


######################################################################
# Spawning Trainer Processes
# --------------------------
#
# The following code spawns a process for each GPU and calls the ``run``
# function defined above.
#


Rhett Ying's avatar
Rhett Ying committed
344
def main():
345
346
    if not torch.cuda.is_available():
        print("No GPU found!")
Rhett Ying's avatar
Rhett Ying committed
347
        return
348

Rhett Ying's avatar
Rhett Ying committed
349
350
351
    devices = [
        torch.device(f"cuda:{i}") for i in range(torch.cuda.device_count())
    ]
352
353
354
355
356
357
358
359
360
361
362
363
364
    world_size = len(devices)

    print(f"Training with {world_size} gpus.")

    # Load and preprocess dataset.
    dataset = gb.BuiltinDataset("ogbn-arxiv").load()

    # Thread limiting to avoid resource competition.
    os.environ["OMP_NUM_THREADS"] = str(mp.cpu_count() // 2 // world_size)

    mp.set_sharing_strategy("file_system")
    mp.spawn(
        run,
Rhett Ying's avatar
Rhett Ying committed
365
        args=(world_size, devices, dataset),
366
367
368
        nprocs=world_size,
        join=True,
    )
Rhett Ying's avatar
Rhett Ying committed
369
370
371
372


if __name__ == "__main__":
    main()