default.yaml 3.25 KB
Newer Older
1
# SuperBench Config
2
version: v0.3
3
superbench:
4
  enable: null
5
6
7
8
  monitor:
    enable: false
    sample_duration: 1
    sample_interval: 10
9
10
  var:
    default_local_mode: &default_local_mode
11
      enable: true
12
13
14
15
      modes:
        - name: local
          proc_num: 8
          prefix: CUDA_VISIBLE_DEVICES={proc_rank}
16
17
          parallel: yes
    default_pytorch_mode: &default_pytorch_mode
18
19
20
21
      enable: true
      modes:
        - name: torch.distributed
          proc_num: 8
22
          node_num: 1
23
24
      frameworks:
        - pytorch
25
26
27
28
29
30
31
32
33
34
    common_model_config: &common_model_config
      duration: 0
      num_warmup: 16
      num_steps: 128
      precision:
        - float32
        - float16
      model_action:
        - train
  benchmarks:
35
36
37
38
    nccl-bw:
      enable: true
      modes:
        - name: local
39
40
41
42
          proc_num: 1
          parallel: no
      parameters:
        ngpus: 8
43
44
45
46
47
48
49
50
51
52
53
    ib-loopback:
      enable: true
      modes:
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=0,2,4,6 NUMA_NODES=1,0,3,2
          parallel: yes
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=1,3,5,7 NUMA_NODES=1,0,3,2
          parallel: yes
54
55
56
    disk-benchmark:
      enable: false
      modes:
57
58
        - name: local
          proc_num: 1
59
60
61
62
          parallel: no
      parameters:
        block_devices:
          - /dev/nvme0n1
63
64
65
66
67
    mem-bw:
      enable: true
      modes:
        - name: local
          proc_num: 8
68
          prefix: CUDA_VISIBLE_DEVICES={proc_rank} numactl -N $(({proc_rank}/2))
69
          parallel: no
70
    gpu-copy-bw:
71
      enable: true
72
73
74
75
      modes:
        - name: local
          parallel: no
      parameters:
76
77
        mem_type:
          - htod
78
79
80
81
82
          - dtoh
          - dtod
        copy_type:
          - sm
          - dma
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    kernel-launch:
      <<: *default_local_mode
    gemm-flops:
      <<: *default_local_mode
    cudnn-function:
      <<: *default_local_mode
    cublas-function:
      <<: *default_local_mode
    matmul:
      <<: *default_local_mode
      frameworks:
        - pytorch
    sharding-matmul:
      <<: *default_pytorch_mode
    computation-communication-overlap:
      <<: *default_pytorch_mode
    gpt_models:
      <<: *default_pytorch_mode
101
102
103
      models:
        - gpt2-small
        - gpt2-large
104
      parameters:
105
        <<: *common_model_config
106
        batch_size: 4
107
    bert_models:
108
      <<: *default_pytorch_mode
109
110
111
112
      models:
        - bert-base
        - bert-large
      parameters:
113
114
        <<: *common_model_config
        batch_size: 8
115
    lstm_models:
116
      <<: *default_pytorch_mode
117
118
119
      models:
        - lstm
      parameters:
120
        <<: *common_model_config
121
        batch_size: 128
122
123
    resnet_models:
      <<: *default_pytorch_mode
124
125
126
127
      models:
        - resnet50
        - resnet101
        - resnet152
128
129
130
131
132
133
      parameters:
        <<: *common_model_config
        batch_size: 128
    densenet_models:
      <<: *default_pytorch_mode
      models:
134
135
        - densenet169
        - densenet201
136
137
138
139
140
141
      parameters:
        <<: *common_model_config
        batch_size: 128
    vgg_models:
      <<: *default_pytorch_mode
      models:
142
143
144
145
146
        - vgg11
        - vgg13
        - vgg16
        - vgg19
      parameters:
147
        <<: *common_model_config
148
        batch_size: 128