default.yaml 3.18 KB
Newer Older
1
# SuperBench Config
2
version: v0.2
3
superbench:
4
  enable: null
5
6
  var:
    default_local_mode: &default_local_mode
7
      enable: true
8
9
10
11
      modes:
        - name: local
          proc_num: 8
          prefix: CUDA_VISIBLE_DEVICES={proc_rank}
12
13
          parallel: yes
    default_pytorch_mode: &default_pytorch_mode
14
15
16
17
      enable: true
      modes:
        - name: torch.distributed
          proc_num: 8
18
          node_num: 1
19
20
      frameworks:
        - pytorch
21
22
23
24
25
26
27
28
29
30
    common_model_config: &common_model_config
      duration: 0
      num_warmup: 16
      num_steps: 128
      precision:
        - float32
        - float16
      model_action:
        - train
  benchmarks:
31
32
33
34
35
    nccl-bw:
      enable: true
      modes:
        - name: local
          prefix: NCCL_DEBUG=INFO NCCL_IB_DISABLE=1
36
37
38
39
40
41
42
43
44
45
46
    ib-loopback:
      enable: true
      modes:
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=0,2,4,6 NUMA_NODES=1,0,3,2
          parallel: yes
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=1,3,5,7 NUMA_NODES=1,0,3,2
          parallel: yes
47
48
49
50
51
52
53
54
    disk-benchmark:
      enable: false
      modes:
        - proc_num: 1
          parallel: no
      parameters:
        block_devices:
          - /dev/nvme0n1
55
56
57
58
59
60
61
    mem-bw:
      enable: true
      modes:
        - name: local
          proc_num: 8
          prefix: CUDA_VISIBLE_DEVICES={proc_rank} numactl -c $(({proc_rank}/2))
          parallel: yes
62
63
64
65
66
67
68
69
70
71
    gpu-sm-copy-bw:
      enable: false
      modes:
        - name: local
          proc_num: 32
          prefix: CUDA_VISIBLE_DEVICES=$(({proc_rank}%8)) numactl -N $(({proc_rank}%4)) -m $(({proc_rank}%4))
          parallel: no
      parameters:
        dtoh: true
        htod: true
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    kernel-launch:
      <<: *default_local_mode
    gemm-flops:
      <<: *default_local_mode
    cudnn-function:
      <<: *default_local_mode
    cublas-function:
      <<: *default_local_mode
    matmul:
      <<: *default_local_mode
      frameworks:
        - pytorch
    sharding-matmul:
      <<: *default_pytorch_mode
    computation-communication-overlap:
      <<: *default_pytorch_mode
    gpt_models:
      <<: *default_pytorch_mode
90
91
92
      models:
        - gpt2-small
        - gpt2-large
93
      parameters:
94
        <<: *common_model_config
95
        batch_size: 4
96
    bert_models:
97
      <<: *default_pytorch_mode
98
99
100
101
      models:
        - bert-base
        - bert-large
      parameters:
102
103
        <<: *common_model_config
        batch_size: 8
104
    lstm_models:
105
      <<: *default_pytorch_mode
106
107
108
      models:
        - lstm
      parameters:
109
        <<: *common_model_config
110
        batch_size: 128
111
112
    resnet_models:
      <<: *default_pytorch_mode
113
114
115
116
      models:
        - resnet50
        - resnet101
        - resnet152
117
118
119
120
121
122
      parameters:
        <<: *common_model_config
        batch_size: 128
    densenet_models:
      <<: *default_pytorch_mode
      models:
123
124
        - densenet169
        - densenet201
125
126
127
128
129
130
      parameters:
        <<: *common_model_config
        batch_size: 128
    vgg_models:
      <<: *default_pytorch_mode
      models:
131
132
133
134
135
        - vgg11
        - vgg13
        - vgg16
        - vgg19
      parameters:
136
        <<: *common_model_config
137
        batch_size: 128