default.yaml 6.44 KB
Newer Older
1
# SuperBench Config
2
version: v0.12
3
superbench:
4
  enable: null
5
  monitor:
6
    enable: true
7
8
    sample_duration: 1
    sample_interval: 10
9
10
  var:
    default_local_mode: &default_local_mode
11
      enable: true
12
13
14
15
      modes:
        - name: local
          proc_num: 8
          prefix: CUDA_VISIBLE_DEVICES={proc_rank}
16
17
          parallel: yes
    default_pytorch_mode: &default_pytorch_mode
18
19
20
21
      enable: true
      modes:
        - name: torch.distributed
          proc_num: 8
22
          node_num: 1
23
24
      frameworks:
        - pytorch
25
26
27
28
    common_model_config: &common_model_config
      duration: 0
      num_warmup: 16
      num_steps: 128
29
      batch_size: 1
30
31
32
33
34
35
      precision:
        - float32
        - float16
      model_action:
        - train
  benchmarks:
36
37
38
39
40
41
42
43
44
45
    gpu-burn:
      enable: true
      modes:
        - name: local
          proc_num: 1
          parallel: no
      parameters:
        time: 300
        doubles: true
        tensor_core: true
46
    nccl-bw:default:
47
48
49
      enable: true
      modes:
        - name: local
50
51
52
53
          proc_num: 1
          parallel: no
      parameters:
        ngpus: 8
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
    nccl-bw:gdr-only:
      enable: true
      modes:
        - name: local
          proc_num: 1
          parallel: no
          env:
            NCCL_IB_PCI_RELAXED_ORDERING: '1'
            NCCL_NET_GDR_LEVEL: '5'
            NCCL_P2P_DISABLE: '1'
            NCCL_SHM_DISABLE: '1'
            NCCL_MIN_NCHANNELS: '16'
            NCCL_IB_DISABLE: '0'
      parameters:
        ngpus: 8
69
70
71
72
73
74
75
76
77
78
79
    ib-loopback:
      enable: true
      modes:
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=0,2,4,6 NUMA_NODES=1,0,3,2
          parallel: yes
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=1,3,5,7 NUMA_NODES=1,0,3,2
          parallel: yes
80
81
82
    disk-benchmark:
      enable: false
      modes:
83
84
        - name: local
          proc_num: 1
85
86
87
88
          parallel: no
      parameters:
        block_devices:
          - /dev/nvme0n1
89
90
91
92
93
94
95
96
97
98
99
    cpu-memory-bw-latency:
      enable: false
      modes:
        - name: local
          proc_num: 1
          parallel: no
      parameters:
        tests:
          - bandwidth_matrix
          - latency_matrix
          - max_bandwidth
100
101
102
103
104
    mem-bw:
      enable: true
      modes:
        - name: local
          proc_num: 8
105
          prefix: CUDA_VISIBLE_DEVICES={proc_rank} numactl -N $(({proc_rank}/2))
106
          parallel: no
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
    gpu-copy-bw:correctness:
      enable: true
      modes:
        - name: local
          parallel: no
      parameters:
        mem_type:
          - htod
          - dtoh
          - dtod
        copy_type:
          - sm
          - dma
        size: 4096
        num_warm_up: 0
        num_loops: 1
        check_data: true
    gpu-copy-bw:perf:
125
      enable: true
126
127
128
129
      modes:
        - name: local
          parallel: no
      parameters:
130
131
        mem_type:
          - htod
132
133
134
135
136
          - dtoh
          - dtod
        copy_type:
          - sm
          - dma
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    nvbandwidth:
      enable: true
      modes:
        - name: local
          parallel: no
      parameters:
        buffer_size: 128
        test_cases:
          - host_to_device_memcpy_ce
          - device_to_host_memcpy_ce
          - host_to_device_memcpy_sm
          - device_to_host_memcpy_sm
        num_loops: 18
        skip_verification: false
        disable_affinity: false
        use_mean: false
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    kernel-launch:
      <<: *default_local_mode
    gemm-flops:
      <<: *default_local_mode
    cudnn-function:
      <<: *default_local_mode
    cublas-function:
      <<: *default_local_mode
    matmul:
      <<: *default_local_mode
      frameworks:
        - pytorch
    sharding-matmul:
      <<: *default_pytorch_mode
    computation-communication-overlap:
      <<: *default_pytorch_mode
169
170
171
172
    ib-traffic:
      enable: false
      modes:
        - name: mpi
173
174
175
176
177
178
          proc_num: 8
      parameters:
        msg_size: 8388608
        ib_dev: mlx5_$LOCAL_RANK
        gpu_dev: $LOCAL_RANK
        numa_dev: $((LOCAL_RANK/2))
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
    gpcnet-network-test:
      enable: false
      modes:
        - name: mpi
          proc_num: 1
          mca:
            pml: ucx
            btl: ^uct
            btl_tcp_if_include: eth0
          env:
            UCX_NET_DEVICES: mlx5_0:1
    gpcnet-network-load-test:
      enable: false
      modes:
        - name: mpi
          proc_num: 1
          mca:
            pml: ucx
            btl: ^uct
            btl_tcp_if_include: eth0
          env:
            UCX_NET_DEVICES: mlx5_0:1
    tcp-connectivity:
      enable: false
      modes:
        - name: local
          parallel: no
      parameters:
        port: 22
208
209
    ort-inference:
      <<: *default_local_mode
210
211
      parameters:
        batch_size: 1
212
213
214
215
216
217
218
219
220
221
222
223
    tensorrt-inference:
      <<: *default_local_mode
      parameters:
        pytorch_models:
          - resnet50
          - resnet101
          - resnet152
          - densenet169
          - densenet201
          - bert-base
          - bert-large
        seq_length: 224
224
        batch_size: 1
225
        precision: int8
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
    megatron-gpt:
      modes:
      - name: mpi
        proc_num: 1
        node_num: all
      parameters:
        code_base: /opt/superbench/third_party/Megatron/Megatron-DeepSpeed/
        dataset_url: https://huggingface.co/datasets/suolyer/pile_bookcorpus2/raw/main/test.json
        batch_size: 2048
        num_warmup: 0
        num_steps: 10
        precision:
          - float16
          - bfloat16
        deepspeed: yes
        sequence_parallel: yes
        use_rotary_position_embeddings: yes
243
244
    gpt_models:
      <<: *default_pytorch_mode
245
246
247
      models:
        - gpt2-small
        - gpt2-large
248
      parameters:
249
        <<: *common_model_config
250
    bert_models:
251
      <<: *default_pytorch_mode
252
253
254
255
      models:
        - bert-base
        - bert-large
      parameters:
256
        <<: *common_model_config
257
    lstm_models:
258
      <<: *default_pytorch_mode
259
260
261
      models:
        - lstm
      parameters:
262
263
264
        <<: *common_model_config
    resnet_models:
      <<: *default_pytorch_mode
265
266
267
268
      models:
        - resnet50
        - resnet101
        - resnet152
269
270
271
272
273
      parameters:
        <<: *common_model_config
    densenet_models:
      <<: *default_pytorch_mode
      models:
274
275
        - densenet169
        - densenet201
276
277
278
279
280
      parameters:
        <<: *common_model_config
    vgg_models:
      <<: *default_pytorch_mode
      models:
281
282
283
284
285
        - vgg11
        - vgg13
        - vgg16
        - vgg19
      parameters:
286
        <<: *common_model_config