"tests/test_fp16_optimizer/configs/vit_2d.py" did not exist on "2ebaefc54268c51b0d0d915adf9ff044bdd8676c"
autochunk_benchmark.py 2.46 KB
Newer Older
oahzxl's avatar
oahzxl committed
1
2
import time

oahzxl's avatar
oahzxl committed
3
4
import torch
import torch.fx
oahzxl's avatar
oahzxl committed
5
6

from chunk_codegen import ChunkCodeGen
oahzxl's avatar
oahzxl committed
7
8
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
oahzxl's avatar
oahzxl committed
9
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
oahzxl's avatar
oahzxl committed
10
11
12
13
from colossalai.fx.profiler import MetaTensor
from evoformer.evoformer import evoformer_base


oahzxl's avatar
oahzxl committed
14
15
16
17
18
def _benchmark_evoformer(model: torch.nn.Module, node, pair, title):
    torch.cuda.reset_peak_memory_stats()
    now_mem = torch.cuda.memory_allocated() / 1024**2

    loop = 16
oahzxl's avatar
oahzxl committed
19
20
21
22
23
24
25
26
27
    with torch.no_grad():
        for _ in range(loop // 4):
            model(node, pair)
        torch.cuda.synchronize()
        time1 = time.time()
        for _ in range(loop):
            model(node, pair)
        torch.cuda.synchronize()
        time2 = time.time()
oahzxl's avatar
oahzxl committed
28
29
30
31
32
33

    new_max_mem = torch.cuda.max_memory_allocated() / 1024**2
    print(
        "%s: time %.4fs, mem %dMB"
        % (title, (time2 - time1) / loop, new_max_mem - now_mem)
    )
oahzxl's avatar
oahzxl committed
34
35
36


def benchmark_evoformer():
oahzxl's avatar
oahzxl committed
37
    # init data and model
oahzxl's avatar
oahzxl committed
38
39
40
41
    msa_len = 300
    pair_len = 800
    node = torch.randn(1, msa_len, pair_len, 256).cuda()
    pair = torch.randn(1, pair_len, pair_len, 128).cuda()
oahzxl's avatar
oahzxl committed
42
    model = evoformer_base().cuda()
oahzxl's avatar
oahzxl committed
43

oahzxl's avatar
oahzxl committed
44
    # build autochunk model
oahzxl's avatar
oahzxl committed
45
    max_memory = 3000  # MB
oahzxl's avatar
oahzxl committed
46
47
48
49
50
51
52
53
    autochunk = _build_autochunk(model, max_memory, node, pair)

    # benchmark
    _benchmark_evoformer(model, node, pair, "openfold")
    _benchmark_evoformer(autochunk, node, pair, "autochunk")


def _build_autochunk(model, max_memory, node, pair):
oahzxl's avatar
oahzxl committed
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    # trace the module and replace codegen
    graph = ColoTracer().trace(
        model,
        meta_args={
            "node": node.to(torch.device("meta")),
            "pair": pair.to(torch.device("meta")),
        },
    )
    gm_prop = torch.fx.symbolic_trace(model)  # must use symbolic_trace
    interp = MetaInfoProp(gm_prop)
    interp.propagate(
        MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0")
    )
    # now run it twice to get meta info in graph module, not necessary
    gm = torch.fx.GraphModule(model, graph)
    interp = MetaInfoProp(gm)
    interp.propagate(
        MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0")
    )
    # set code_gen
    codegen = ChunkCodeGen(gm_prop, max_memory)
    graph.set_codegen(codegen)
    gm = ColoGraphModule(model, graph)
    gm.recompile()
    # print
    code = graph.python_code("self").src
    print(code)
oahzxl's avatar
oahzxl committed
81
    return gm
oahzxl's avatar
oahzxl committed
82
83
84
85


if __name__ == "__main__":
    benchmark_evoformer()