gpu_affinity.py 14.9 KB
Newer Older
Sugon_ldc's avatar
Sugon_ldc committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
import collections
import itertools
import os
import pathlib
import re

import pynvml
from typing import Union


class Device:
    # assume nvml returns list of 64 bit ints
    _nvml_bit_affinity = 64

    _nvml_affinity_elements = (
        os.cpu_count() + _nvml_bit_affinity - 1
    ) // _nvml_bit_affinity

    def __init__(self, device_idx):
        super().__init__()
        self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)

    def get_name(self):
        return pynvml.nvmlDeviceGetName(self.handle)

    def get_uuid(self):
        return pynvml.nvmlDeviceGetUUID(self.handle)

    def get_cpu_affinity(self):
        affinity_string = ""
        for j in pynvml.nvmlDeviceGetCpuAffinity(
            self.handle, Device._nvml_affinity_elements
        ):
            # assume nvml returns list of 64 bit ints
            affinity_string = "{:064b}".format(j) + affinity_string

        affinity_list = [int(x) for x in affinity_string]
        affinity_list.reverse()  # so core 0 is in 0th element of list

        ret = [i for i, e in enumerate(affinity_list) if e != 0]
        return ret


def get_thread_siblings_list():
    """
    Returns a list of 2-element integer tuples representing pairs of
    hyperthreading cores.
    """
    path = "/sys/devices/system/cpu/cpu*/topology/thread_siblings_list"
    thread_siblings_list = []
    pattern = re.compile(r"(\d+)\D(\d+)")
    for fname in pathlib.Path(path[0]).glob(path[1:]):
        with open(fname) as f:
            content = f.read().strip()
            res = pattern.findall(content)
            if res:
                pair = tuple(sorted(map(int, res[0])))
                thread_siblings_list.append(pair)
    thread_siblings_list = list(set(thread_siblings_list))
    return thread_siblings_list


def build_thread_siblings_dict(siblings_list):
    siblings_dict = {}
    for siblings_tuple in siblings_list:
        for core in siblings_tuple:
            siblings_dict[core] = siblings_tuple

    return siblings_dict


def group_list_by_dict(affinity, siblings_dict):
    sorted_affinity = sorted(affinity, key=lambda x: siblings_dict.get(x, (x,)))
    grouped = itertools.groupby(
        sorted_affinity, key=lambda x: siblings_dict.get(x, (x,))
    )
    grouped_affinity = []
    for key, group in grouped:
        grouped_affinity.append(tuple(group))
    return grouped_affinity


def group_affinity_by_siblings(socket_affinities):
    siblings_list = get_thread_siblings_list()
    siblings_dict = build_thread_siblings_dict(siblings_list)

    grouped_socket_affinities = []

    for socket_affinity in socket_affinities:
        grouped_socket_affinities.append(
            group_list_by_dict(socket_affinity, siblings_dict)
        )
    return grouped_socket_affinities


def ungroup_affinities(affinities, cores):
    ungrouped_affinities = []

    for affinity in affinities:
        if cores == "all_logical":
            ungrouped_affinities.append(list(itertools.chain(*affinity)))
        elif cores == "single_logical":
            ungrouped_affinities.append([group[0] for group in affinity])
        else:
            raise RuntimeError("Unknown cores mode")
    return ungrouped_affinities


def check_socket_affinities(socket_affinities):
    # sets of cores should be either identical or disjoint
    for i, j in itertools.product(socket_affinities, socket_affinities):
        if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
            raise RuntimeError(
                f"Sets of cores should be either identical or disjoint, "
                f"but got {i} and {j}."
            )


def get_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
    devices = [Device(i) for i in range(nproc_per_node)]
    socket_affinities = [dev.get_cpu_affinity() for dev in devices]

    if exclude_unavailable_cores:
        available_cores = os.sched_getaffinity(0)
        socket_affinities = [
            list(set(affinity) & available_cores) for affinity in socket_affinities
        ]

    check_socket_affinities(socket_affinities)

    return socket_affinities


def get_grouped_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
    socket_affinities = get_socket_affinities(nproc_per_node, exclude_unavailable_cores)
    grouped_socket_affinities = group_affinity_by_siblings(socket_affinities)
    return grouped_socket_affinities


def set_socket_affinity(gpu_id, nproc_per_node, cores):
    """
    The process is assigned with all available physical CPU cores from the CPU
    socket connected to the GPU with a given id.

    Args:
        gpu_id: index of a GPU
        nproc_per_node: number of processes per node
        cores: 'all_logical' or 'single_logical'
    """
    grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
    ungrouped_affinities = ungroup_affinities(grouped_socket_affinities, cores)
    os.sched_setaffinity(0, ungrouped_affinities[gpu_id])


def set_socket_single_affinity(gpu_id, nproc_per_node, cores):
    """
    The process is assigned with the first available physical CPU core from the
    list of all CPU physical cores from the CPU socket connected to the GPU with
    a given id.

    Args:
        gpu_id: index of a GPU
        nproc_per_node: number of processes per node
        cores: 'all_logical' or 'single_logical'
    """
    grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)
    single_grouped_socket_affinities = [
        group[:1] for group in grouped_socket_affinities
    ]
    ungrouped_affinities = ungroup_affinities(single_grouped_socket_affinities, cores)
    os.sched_setaffinity(0, ungrouped_affinities[gpu_id])


def set_socket_single_unique_affinity(gpu_id, nproc_per_node, cores):
    """
    The process is assigned with a single unique available physical CPU core
    from the list of all CPU cores from the CPU socket connected to the GPU with
    a given id.

    Args:
        gpu_id: index of a GPU
        nproc_per_node: number of processes per node
        cores: 'all_logical' or 'single_logical'
    """
    grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)

    affinities = []
    assigned_groups = set()

    for grouped_socket_affinity in grouped_socket_affinities:
        for group in grouped_socket_affinity:
            if group not in assigned_groups:
                affinities.append([group])
                assigned_groups.add(group)
                break

    ungrouped_affinities = ungroup_affinities(affinities, cores)

    os.sched_setaffinity(0, ungrouped_affinities[gpu_id])


def set_socket_unique_affinity(gpu_id, nproc_per_node, cores, mode, balanced=True):
    """
    The process is assigned with a unique subset of available physical CPU
    cores from the CPU socket connected to a GPU with a given id.
    Assignment automatically includes hyperthreading siblings (if siblings are
    available).

    Args:
        gpu_id: index of a GPU
        nproc_per_node: number of processes per node
        cores: 'all_logical' or 'single_logical'
        mode: 'contiguous' or 'interleaved'
        balanced: assign an equal number of physical cores to each process,
    """
    grouped_socket_affinities = get_grouped_socket_affinities(nproc_per_node)

    grouped_socket_affinities_to_device_ids = collections.defaultdict(list)

    for idx, grouped_socket_affinity in enumerate(grouped_socket_affinities):
        grouped_socket_affinities_to_device_ids[tuple(grouped_socket_affinity)].append(
            idx
        )

    # compute minimal number of physical cores per GPU across all GPUs and
    # sockets, code assigns this number of cores per GPU if balanced == True
    min_physical_cores_per_gpu = min(
        [
            len(cores) // len(gpus)
            for cores, gpus in grouped_socket_affinities_to_device_ids.items()
        ]
    )

    grouped_unique_affinities = [None] * nproc_per_node

    for (
        grouped_socket_affinity,
        device_ids,
    ) in grouped_socket_affinities_to_device_ids.items():
        devices_per_group = len(device_ids)
        if balanced:
            cores_per_device = min_physical_cores_per_gpu
            grouped_socket_affinity = grouped_socket_affinity[
                : devices_per_group * min_physical_cores_per_gpu
            ]
        else:
            cores_per_device = len(grouped_socket_affinity) // devices_per_group

        for socket_subgroup_id, device_id in enumerate(device_ids):
            # In theory there should be no difference in performance between
            # 'interleaved' and 'contiguous' pattern on Intel-based DGX-1,
            # but 'contiguous' should be better for DGX A100 because on AMD
            # Rome 4 consecutive cores are sharing L3 cache.
            # TODO: code doesn't attempt to automatically detect layout of
            # L3 cache, also external environment may already exclude some
            # cores, this code makes no attempt to detect it and to align
            # mapping to multiples of 4.

            if mode == "interleaved":
                unique_grouped_affinity = list(
                    grouped_socket_affinity[socket_subgroup_id::devices_per_group]
                )
            elif mode == "contiguous":
                unique_grouped_affinity = list(
                    grouped_socket_affinity[
                        socket_subgroup_id
                        * cores_per_device : (socket_subgroup_id + 1)
                        * cores_per_device
                    ]
                )
            else:
                raise RuntimeError("Unknown set_socket_unique_affinity mode")

            grouped_unique_affinities[device_id] = unique_grouped_affinity

    ungrouped_affinities = ungroup_affinities(grouped_unique_affinities, cores)
    os.sched_setaffinity(0, ungrouped_affinities[gpu_id])


from enum import Enum, auto


class AffinityMode(Enum):
    none = auto()
    socket = auto()
    socket_single = auto()
    socket_single_unique = auto()
    socket_unique_interleaved = auto()
    socket_unique_contiguous = auto()


def set_affinity(
    gpu_id,
    nproc_per_node=None,
    *,
    mode: Union[str, AffinityMode] = AffinityMode.socket_unique_contiguous,
    cores="all_logical",
    balanced=True,
):
    """
    The process is assigned with a proper CPU affinity that matches CPU-GPU
    hardware architecture on a given platform. Usually, it improves and
    stabilizes the performance of deep learning training workloads.

    This function assumes that the workload runs in multi-process single-device
    mode (there are multiple training processes, and each process is running on
    a single GPU). This is typical for multi-GPU data-parallel training
    workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`).

    Available affinity modes:
    * 'socket' - the process is assigned with all available physical CPU cores
    from the CPU socket connected to the GPU with a given id.
    * 'socket_single' - the process is assigned with the first available
    physical CPU core from the list of all CPU cores from the CPU socket
    connected to the GPU with a given id (multiple GPUs could be assigned with
    the same CPU core).
    * 'socket_single_unique' - the process is assigned with a single unique
    available physical CPU core from the list of all CPU cores from the CPU
    socket connected to the GPU with a given id.
    * 'socket_unique_interleaved' - the process is assigned with a unique
    subset of available physical CPU cores from the CPU socket connected to a
    GPU with a given id, cores are assigned with interleaved indexing pattern
    * 'socket_unique_contiguous' - (the default) the process is assigned with a
    unique subset of available physical CPU cores from the CPU socket connected
    to a GPU with a given id, cores are assigned with contiguous indexing
    pattern

    Available "cores" modes:
    * 'all_logical' - assigns the process with all logical cores associated with
    a given corresponding physical core (i.e., automatically includes all
    available hyperthreading siblings)
    * 'single_logical' - assigns the process with only one logical core
    associated with a given corresponding physical core (i.e., excludes
    hyperthreading siblings)

    'socket_unique_contiguous' is the recommended mode for deep learning
    training workloads on NVIDIA DGX machines.

    Args:
        gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1
        nproc_per_node: number of processes per node
        mode: affinity mode
        balanced: assign an equal number of physical cores to each process,
            affects only 'socket_unique_interleaved' and
            'socket_unique_contiguous' affinity modes
        cores: 'all_logical' or 'single_logical'

    Returns a set of logical CPU cores on which the process is eligible to run.

    Example:

    import argparse
    import os

    import gpu_affinity
    import torch


    def main():
        parser = argparse.ArgumentParser()
        parser.add_argument(
            '--local_rank',
            type=int,
            default=os.getenv('LOCAL_RANK', 0),
        )
        args = parser.parse_args()

        nproc_per_node = torch.cuda.device_count()

        affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
        print(f'{args.local_rank}: core affinity: {affinity}')


    if __name__ == "__main__":
        main()

    Launch the example with:
    python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py


    WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs.
    This function restricts execution only to the CPU cores directly connected
    to GPUs, so on DGX A100, it will limit the code to half of the CPU cores and
    half of CPU memory bandwidth (which may be fine for many DL models).

    WARNING: Intel's OpenMP implementation resets affinity on the first call to
    an OpenMP function after a fork. It's recommended to run with env variable:
    `KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be
    preserved after a fork (e.g. in PyTorch DataLoader workers).
    """
    if not isinstance(mode, AffinityMode):
        mode = AffinityMode[mode]
    #pynvml.nvmlInit()
    #if nproc_per_node is None:
    #    nproc_per_node = pynvml.nvmlDeviceGetCount()

    if mode == AffinityMode.none:
        pass
    elif mode == AffinityMode.socket:
        set_socket_affinity(gpu_id, nproc_per_node, cores)
    elif mode == AffinityMode.socket_single:
        set_socket_single_affinity(gpu_id, nproc_per_node, cores)
    elif mode == AffinityMode.socket_single_unique:
        set_socket_single_unique_affinity(gpu_id, nproc_per_node, cores)
    elif mode == AffinityMode.socket_unique_interleaved:
        set_socket_unique_affinity(
            gpu_id, nproc_per_node, cores, "interleaved", balanced
        )
    elif mode == AffinityMode.socket_unique_contiguous:
        set_socket_unique_affinity(
            gpu_id, nproc_per_node, cores, "contiguous", balanced
        )
    else:
        raise RuntimeError("Unknown affinity mode")

    affinity = os.sched_getaffinity(0)
    return affinity