unflatten_bench.py 3.88 KB
Newer Older
aiss's avatar
aiss committed
1
2
3
4
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team
aiss's avatar
aiss committed
5

aiss's avatar
aiss committed
6
7
8
9
10
11
12
13
14
15
16
17
18
#!/usr/bin/env python

# run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l)
#
# usage:
# ./unflatten_bench.py -t
# ./unflatten_bench.py -c
# kernprof -l unflatten_bench.py -l; python -m line_profiler  unflatten_bench.py.lprof

import argparse
import gc
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
aiss's avatar
aiss committed
19
from deepspeed.accelerator import get_accelerator
aiss's avatar
aiss committed
20
21
22
23
24
25
26
27
28
29
30
31
from deepspeed.ops.op_builder import UtilsBuilder

from apex_C import flatten as flatten_apex
from apex_C import unflatten as unflatten_apex

util_ops = UtilsBuilder().load()
flatten = util_ops.flatten
unflatten = util_ops.unflatten

torch.manual_seed(0)
# emulate a small typical model weights
x = [
aiss's avatar
aiss committed
32
33
34
    torch.rand((512, 512)).to(get_accelerator().device_name()),
    torch.rand((512, 1024)).to(get_accelerator().device_name()),
    torch.rand((512, 30000)).to(get_accelerator().device_name())
aiss's avatar
aiss committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
]
unflat_t = x * 30

# warm up and check that the same output is produced
flat_py = _flatten_dense_tensors(unflat_t)
flat_cpp = flatten(unflat_t)
flat_apex = flatten_apex(unflat_t)
#numel = flat_cpp.numel()
assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor"
assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor"

flat_t = flat_py
unflat_py = _unflatten_dense_tensors(flat_py, unflat_t)
for i in range(len(unflat_t)):
    assert torch.eq(unflat_t[i], unflat_py[i]).all()
unflat_cpp = _unflatten_dense_tensors(flat_cpp, unflat_t)
for i in range(len(unflat_t)):
    assert torch.eq(unflat_t[i], unflat_cpp[i]).all()
unflat_apex = _unflatten_dense_tensors(flat_apex, unflat_t)
for i in range(len(unflat_t)):
    assert torch.eq(unflat_t[i], unflat_apex[i]).all()


# the programs being tested
def py():
    for i in range(1000):
        unflat = _unflatten_dense_tensors(flat_t, unflat_t)


def cpp():
    for i in range(1000):
        unflat = unflatten(flat_t, unflat_t)


def apex():
    for i in range(1000):
        unflat = unflatten_apex(flat_t, unflat_t)


#### cProfile ####

import cProfile


def cprofileme():
    print("--------------- cProfile -----------------")
    print("py")
    cProfile.run("py()", sort=-1)
    gc.collect()
aiss's avatar
aiss committed
84
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
85
86
87
    print("cpp")
    cProfile.run("cpp()", sort=-1)
    gc.collect()
aiss's avatar
aiss committed
88
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
89
90
91
    print("apex")
    cProfile.run("apex()", sort=-1)
    gc.collect()
aiss's avatar
aiss committed
92
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
93
94
95
96
97
98
99
100
101
102
103


#### timeit ####

import timeit


def timeme():
    print("--------------- timeit -----------------")
    print(f'py  ={timeit.Timer("py()", globals=globals()).timeit(number=1)}')
    gc.collect()
aiss's avatar
aiss committed
104
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
105
106
    print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}')
    gc.collect()
aiss's avatar
aiss committed
107
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
108
109
    print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}')
    gc.collect()
aiss's avatar
aiss committed
110
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
111
112
113
114
115
116
117
118
119
120
121


#### line_profiler ####
# this one requires a special way to be called
# pip install line_profiler
# kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof


def line_profileme():
    print("--------------- line_profier -----------------")
    print("py")
aiss's avatar
aiss committed
122
    profile(py)()  # noqa: F821
aiss's avatar
aiss committed
123
    gc.collect()
aiss's avatar
aiss committed
124
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
125
    print("cpp")
aiss's avatar
aiss committed
126
    profile(cpp)()  # noqa: F821
aiss's avatar
aiss committed
127
    gc.collect()
aiss's avatar
aiss committed
128
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
129
    print("apex")
aiss's avatar
aiss committed
130
    profile(apex)()  # noqa: F821
aiss's avatar
aiss committed
131
    gc.collect()
aiss's avatar
aiss committed
132
    get_accelerator().empty_cache()
aiss's avatar
aiss committed
133
134
135
136
137
138
139
140
141
142
143
144
145
146


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-l", action='store_true')
    parser.add_argument("-c", action='store_true')
    parser.add_argument("-t", action='store_true')
    args = parser.parse_args()
    if args.l:
        line_profileme()
    elif args.c:
        cprofileme()
    elif args.t:
        timeme()